diff --git a/.github/scripts/check_diff.py b/.github/scripts/check_diff.py index d950da6..85ee197 100644 --- a/.github/scripts/check_diff.py +++ b/.github/scripts/check_diff.py @@ -2,10 +2,10 @@ import sys from typing import Dict -LIB_DIRS = ["libs/mongodb"] +LIB_DIRS = ["libs/mongodb", "libs/langgraph-checkpoint-mongodb"] if __name__ == "__main__": - files = sys.argv[1:] + files = sys.argv[1:] # changed files dirs_to_run: Dict[str, set] = { "lint": set(), @@ -26,13 +26,15 @@ ".github/scripts/check_diff.py", ) ): - # add all LANGCHAIN_DIRS for infra changes + # add all LIB_DIRS for infra changes dirs_to_run["test"].update(LIB_DIRS) if any(file.startswith(dir_) for dir_ in LIB_DIRS): for dir_ in LIB_DIRS: if file.startswith(dir_): dirs_to_run["test"].add(dir_) + if "langgraph-checkpoint-mongodb/tests" not in file: + dirs_to_run["lint"].add(dir_) elif file.startswith("libs/"): raise ValueError( f"Unknown lib: {file}. check_diff.py likely needs " @@ -40,7 +42,7 @@ ) outputs = { - "dirs-to-lint": list(dirs_to_run["lint"] | dirs_to_run["test"]), + "dirs-to-lint": list(dirs_to_run["lint"]), "dirs-to-test": list(dirs_to_run["test"]), } for key, value in outputs.items(): diff --git a/.github/workflows/extract_ignored_words_list.py b/.github/scripts/extract_ignored_words_list.py similarity index 100% rename from .github/workflows/extract_ignored_words_list.py rename to .github/scripts/extract_ignored_words_list.py diff --git a/.github/workflows/_codespell.yml b/.github/workflows/_codespell.yml index fc81ef7..53e4dc4 100644 --- a/.github/workflows/_codespell.yml +++ b/.github/workflows/_codespell.yml @@ -29,7 +29,7 @@ jobs: working-directory: ${{ inputs.working-directory }} run: | # Use a Python script to extract the ignore words list from pyproject.toml - python ../../.github/workflows/extract_ignored_words_list.py + python ../../.github/scripts/extract_ignored_words_list.py id: extract_ignore_words - name: Codespell diff --git a/.github/workflows/_compile_integration_test.yml b/.github/workflows/_compile_integration_test.yml deleted file mode 100644 index 6f0d790..0000000 --- a/.github/workflows/_compile_integration_test.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: compile-integration-test - -on: - workflow_call: - inputs: - working-directory: - required: true - type: string - description: "From which folder this pipeline executes" - -env: - POETRY_VERSION: "1.7.1" - -jobs: - build: - defaults: - run: - working-directory: ${{ inputs.working-directory }} - runs-on: ubuntu-latest - strategy: - matrix: - python-version: - - "3.9" - - "3.12" - name: "poetry run pytest -m compile tests/integration_tests #${{ matrix.python-version }}" - steps: - - uses: actions/checkout@v4 - - - name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }} - uses: "./.github/actions/poetry_setup" - with: - python-version: ${{ matrix.python-version }} - poetry-version: ${{ env.POETRY_VERSION }} - working-directory: ${{ inputs.working-directory }} - cache-key: compile-integration - - - name: Install integration dependencies - shell: bash - run: poetry install --with=test_integration,test - - - name: Check integration tests compile - shell: bash - run: poetry run pytest -m compile tests/integration_tests - - - name: Ensure the tests did not create any additional files - shell: bash - run: | - set -eu - - STATUS="$(git status)" - echo "$STATUS" - - # grep will exit non-zero if the target message isn't found, - # and `set -e` above will cause the step to fail. - echo "$STATUS" | grep 'nothing to commit, working tree clean' diff --git a/.github/workflows/_lint.yml b/.github/workflows/_lint.yml index 7a2f9b1..73588f8 100644 --- a/.github/workflows/_lint.yml +++ b/.github/workflows/_lint.yml @@ -65,7 +65,7 @@ jobs: # It doesn't matter how you change it, any change will cause a cache-bust. working-directory: ${{ inputs.working-directory }} run: | - poetry install --with lint,typing + poetry install --with dev - name: Get .mypy_cache to speed up mypy uses: actions/cache@v4 @@ -85,7 +85,7 @@ jobs: - name: Install unit+integration test dependencies working-directory: ${{ inputs.working-directory }} run: | - poetry install --with test,test_integration + poetry install --with dev - name: Get .mypy_cache_test to speed up mypy uses: actions/cache@v4 @@ -96,7 +96,7 @@ jobs: ${{ env.WORKDIR }}/.mypy_cache_test key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }} - - name: Analysing the code with our lint - working-directory: ${{ inputs.working-directory }} - run: | - make lint_tests +# - name: Analysing the code with our lint +# working-directory: ${{ inputs.working-directory }} +# run: | +# make lint_tests diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml index 31063a4..a982dad 100644 --- a/.github/workflows/_release.yml +++ b/.github/workflows/_release.yml @@ -71,7 +71,7 @@ jobs: needs: - build uses: - ./.github/workflows/_test_release.yml + ./.github/workflows/_langchain-test_release.yml permissions: write-all with: working-directory: ${{ inputs.working-directory }} @@ -138,7 +138,7 @@ jobs: poetry run python -c "import $IMPORT_NAME; print(dir($IMPORT_NAME))" - name: Import test dependencies - run: poetry install --with test,test_integration + run: poetry install --with dev working-directory: ${{ inputs.working-directory }} # Overwrite the local version of the package with the test PyPI version. diff --git a/.github/workflows/_test.yml b/.github/workflows/_test.yml index 69265bc..bc02880 100644 --- a/.github/workflows/_test.yml +++ b/.github/workflows/_test.yml @@ -36,7 +36,10 @@ jobs: - name: Install dependencies shell: bash - run: poetry install --with test + run: poetry install --with dev + + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.11.0 - name: Run core tests shell: bash diff --git a/.github/workflows/check_diffs.yml b/.github/workflows/ci.yml similarity index 83% rename from .github/workflows/check_diffs.yml rename to .github/workflows/ci.yml index b6f4891..a612ce7 100644 --- a/.github/workflows/check_diffs.yml +++ b/.github/workflows/ci.yml @@ -59,20 +59,9 @@ jobs: working-directory: ${{ matrix.working-directory }} secrets: inherit - compile-integration-tests: - name: cd ${{ matrix.working-directory }} - needs: [ build ] - if: ${{ needs.build.outputs.dirs-to-test != '[]' }} - strategy: - matrix: - working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }} - uses: ./.github/workflows/_compile_integration_test.yml - with: - working-directory: ${{ matrix.working-directory }} - secrets: inherit ci_success: name: "CI Success" - needs: [build, lint, test, compile-integration-tests] + needs: [build, lint, test] if: | always() runs-on: ubuntu-latest diff --git a/README.md b/README.md index bbb2989..834008b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,9 @@ # 🦜️🔗 LangChain MongoDB -This repository contains 1 package with MongoDB integrations with LangChain: +This is a Monorepo containing partner packages of MongoDB and LangChainAI. +It includes integrations between MongoDB, Atlas, LangChain, and LangGraph. -- [langchain-mongodb](https://pypi.org/project/langchain-mongodb/) +It contains the following packages. + +- `langchain-mongodb` ([PyPI](https://pypi.org/project/langchain-mongodb/)) +- `langgraph-checkpoint-mongodb` diff --git a/libs/langgraph-checkpoint-mongodb/Makefile b/libs/langgraph-checkpoint-mongodb/Makefile new file mode 100644 index 0000000..caf7243 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/Makefile @@ -0,0 +1,42 @@ +.PHONY: test lint format help + +###################### +# TESTING AND COVERAGE +###################### + +test tests: + poetry run pytest tests + +###################### +# LINTING AND FORMATTING +###################### + +# Define a variable for Python and notebook files. +PYTHON_FILES=. +MYPY_CACHE=.mypy_cache +lint format: PYTHON_FILES=. +lint_diff format_diff: PYTHON_FILES=$(shell git diff --name-only --relative --diff-filter=d main . | grep -E '\.py$$|\.ipynb$$') +lint_package: PYTHON_FILES=langgraph +lint_tests: PYTHON_FILES=tests +lint_tests: MYPY_CACHE=.mypy_cache_test + +lint lint_diff lint_package lint_tests: + poetry run ruff check . + [ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff + [ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --select I $(PYTHON_FILES) + [ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) + [ "$(PYTHON_FILES)" = "" ] || poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE) + +format format_diff: + poetry run ruff format $(PYTHON_FILES) + poetry run ruff check --select I --fix $(PYTHON_FILES) + +###################### +# HELP +###################### + +help: + @echo '----' + @echo 'format - run code formatters' + @echo 'lint - run linters' + @echo 'test - run unit tests' diff --git a/libs/langgraph-checkpoint-mongodb/README.md b/libs/langgraph-checkpoint-mongodb/README.md new file mode 100644 index 0000000..bab3912 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/README.md @@ -0,0 +1,95 @@ +# LangGraph Checkpoint MongoDB + +Implementation of LangGraph CheckpointSaver that uses MongoDB. + +## Usage + +```python +from langgraph.checkpoint.mongodb import MongoDBSaver + +write_config = {"configurable": {"thread_id": "1", "checkpoint_ns": ""}} +read_config = {"configurable": {"thread_id": "1"}} + +MONGODB_URI = "mongodb://localhost:27017" +DB_NAME = "checkpoint_example" + +with MongoDBSaver.from_conn_string(MONGODB_URI, DB_NAME) as checkpointer: + # call .setup() the first time you're using the checkpointer + checkpointer.setup() + checkpoint = { + "v": 1, + "ts": "2024-07-31T20:14:19.804150+00:00", + "id": "1ef4f797-8335-6428-8001-8a1503f9b875", + "channel_values": { + "my_key": "meow", + "node": "node" + }, + "channel_versions": { + "__start__": 2, + "my_key": 3, + "start:node": 3, + "node": 3 + }, + "versions_seen": { + "__input__": {}, + "__start__": { + "__start__": 1 + }, + "node": { + "start:node": 2 + } + }, + "pending_sends": [], + } + + # store checkpoint + checkpointer.put(write_config, checkpoint, {}, {}) + + # load checkpoint + checkpointer.get(read_config) + + # list checkpoints + list(checkpointer.list(read_config)) +``` + +### Async + +```python +from langgraph.checkpoint.pymongo import AsyncMongoDBSaver + +async with AsyncMongoDBSaver.from_conn_string(MONGODB_URI) as checkpointer: + checkpoint = { + "v": 1, + "ts": "2024-07-31T20:14:19.804150+00:00", + "id": "1ef4f797-8335-6428-8001-8a1503f9b875", + "channel_values": { + "my_key": "meow", + "node": "node" + }, + "channel_versions": { + "__start__": 2, + "my_key": 3, + "start:node": 3, + "node": 3 + }, + "versions_seen": { + "__input__": {}, + "__start__": { + "__start__": 1 + }, + "node": { + "start:node": 2 + } + }, + "pending_sends": [], + } + + # store checkpoint + await checkpointer.aput(write_config, checkpoint, {}, {}) + + # load checkpoint + await checkpointer.aget(read_config) + + # list checkpoints + [c async for c in checkpointer.alist(read_config)] +``` diff --git a/libs/langgraph-checkpoint-mongodb/langgraph/checkpoint/mongodb/__init__.py b/libs/langgraph-checkpoint-mongodb/langgraph/checkpoint/mongodb/__init__.py new file mode 100644 index 0000000..2f18e9e --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/langgraph/checkpoint/mongodb/__init__.py @@ -0,0 +1,376 @@ +from contextlib import contextmanager +from typing import ( + Any, + Dict, + Iterator, + Optional, + Sequence, + Tuple, +) + +from langchain_core.runnables import RunnableConfig +from pymongo import MongoClient, UpdateOne +from pymongo.database import Database as MongoDatabase + +from langgraph.checkpoint.base import ( + WRITES_IDX_MAP, + BaseCheckpointSaver, + ChannelVersions, + Checkpoint, + CheckpointMetadata, + CheckpointTuple, + get_checkpoint_id, +) + +from .utils import dumps_metadata, loads_metadata + + +class MongoDBSaver(BaseCheckpointSaver): + """A checkpoint saver that stores StateGraph checkpoints in a MongoDB database. + + Args: + client (MongoClient): The MongoDB connection. + db_name (Optional[str]): Database name + checkpoint_collection_name (Optional[str]): Name of Collection of Checkpoints + writes_collection_name (Optional[str]): Name of Collection of intermediate writes. + + Examples: + + >>> from langgraph.checkpoint.mongodb import MongoDBSaver + >>> from langgraph.graph import StateGraph + >>> from pymongo import MongoClient + >>> + >>> builder = StateGraph(int) + >>> builder.add_node("add_one", lambda x: x + 1) + >>> builder.set_entry_point("add_one") + >>> builder.set_finish_point("add_one") + >>> client = MongoClient("mongodb://localhost:27017") + >>> memory = MongoDBSaver(client) + >>> graph = builder.compile(checkpointer=memory) + >>> config = {"configurable": {"thread_id": "1"}} + >>> graph.get_state(config) + >>> result = graph.invoke(3, config) + >>> graph.get_state(config) + StateSnapshot(values=4, next=(), config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef8b22d-df71-6ddc-8001-7c821b5c45fd'}}, metadata={'source': 'loop', 'writes': {'add_one': 4}, 'step': 1, 'parents': {}}, created_at='2024-10-15T18:25:34.088329+00:00', parent_config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef8b22d-df6f-6eec-8000-20f621dcf3b7'}}, tasks=()) + """ + + client: MongoClient + db: MongoDatabase + + def __init__( + self, + client: MongoClient, + db_name: str = "checkpointing_db", + checkpoint_collection_name: str = "checkpoints", + writes_collection_name: str = "checkpoint_writes", + **kwargs: Any, + ) -> None: + super().__init__() + self.client = client + self.db = self.client[db_name] + self.checkpoint_collection = self.db[checkpoint_collection_name] + self.writes_collection = self.db[writes_collection_name] + + @classmethod + @contextmanager + def from_conn_string( + cls, + conn_string: Optional[str] = None, + db_name: str = "checkpointing_db", + checkpoint_collection_name: str = "checkpoints", + writes_collection_name: str = "checkpoint_writes", + **kwargs: Any, + ) -> Iterator["MongoDBSaver"]: + """Context manager to create a MongoDB checkpoint saver. + Args: + conn_string: MongoDB connection string. See [class:~pymongo.MongoClient]. + db_name: Database name. It will be created if it doesn't exist. + checkpoint_collection_name: Checkpoint Collection name. Created if it doesn't exist. + writes_collection_name: Collection name of intermediate writes. Created if it doesn't exist. + Yields: A new MongoDBSaver. + """ + client: Optional[MongoClient] = None + try: + client = MongoClient(conn_string) + yield MongoDBSaver( + client, + db_name, + checkpoint_collection_name, + writes_collection_name, + **kwargs, + ) + finally: + if client: + client.close() + + def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: + """Get a checkpoint tuple from the database. + + This method retrieves a checkpoint tuple from the MongoDB database based on the + provided config. If the config contains a "checkpoint_id" key, the checkpoint with + the matching thread ID and checkpoint ID is retrieved. Otherwise, the latest checkpoint + for the given thread ID is retrieved. + + Args: + config (RunnableConfig): The config to use for retrieving the checkpoint. + + Returns: + Optional[CheckpointTuple]: The retrieved checkpoint tuple, or None if no matching checkpoint was found. + + Examples: + + Basic: + >>> config = {"configurable": {"thread_id": "1"}} + >>> checkpoint_tuple = memory.get_tuple(config) + >>> print(checkpoint_tuple) + CheckpointTuple(...) + + With checkpoint ID: + >>> config = { + ... "configurable": { + ... "thread_id": "1", + ... "checkpoint_ns": "", + ... "checkpoint_id": "1ef4f797-8335-6428-8001-8a1503f9b875", + ... } + ... } + >>> checkpoint_tuple = memory.get_tuple(config) + >>> print(checkpoint_tuple) + CheckpointTuple(...) + """ + thread_id = config["configurable"]["thread_id"] + checkpoint_ns = config["configurable"].get("checkpoint_ns", "") + if checkpoint_id := get_checkpoint_id(config): + query = { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": checkpoint_id, + } + else: + query = {"thread_id": thread_id, "checkpoint_ns": checkpoint_ns} + + result = self.checkpoint_collection.find( + query, sort=[("checkpoint_id", -1)], limit=1 + ) + for doc in result: + config_values = { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": doc["checkpoint_id"], + } + checkpoint = self.serde.loads_typed((doc["type"], doc["checkpoint"])) + serialized_writes = self.writes_collection.find(config_values) + pending_writes = [ + ( + doc["task_id"], + doc["channel"], + self.serde.loads_typed((doc["type"], doc["value"])), + ) + for doc in serialized_writes + ] + return CheckpointTuple( + {"configurable": config_values}, + checkpoint, + loads_metadata(doc["metadata"]), + ( + { + "configurable": { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": doc["parent_checkpoint_id"], + } + } + if doc.get("parent_checkpoint_id") + else None + ), + pending_writes, + ) + + def list( + self, + config: Optional[RunnableConfig], + *, + filter: Optional[Dict[str, Any]] = None, + before: Optional[RunnableConfig] = None, + limit: Optional[int] = None, + ) -> Iterator[CheckpointTuple]: + """List checkpoints from the database. + + This method retrieves a list of checkpoint tuples from the MongoDB database based + on the provided config. The checkpoints are ordered by checkpoint ID in descending order (newest first). + + Args: + config (RunnableConfig): The config to use for listing the checkpoints. + filter (Optional[Dict[str, Any]]): Additional filtering criteria for metadata. Defaults to None. + before (Optional[RunnableConfig]): If provided, only checkpoints before the specified checkpoint ID are returned. Defaults to None. + limit (Optional[int]): The maximum number of checkpoints to return. Defaults to None. + + Yields: + Iterator[CheckpointTuple]: An iterator of checkpoint tuples. + + Examples: + >>> from langgraph.checkpoint.mongodb import MongoDBSaver + >>> with MongoDBSaver.from_conn_string("mongodb://localhost:27017") as memory: + ... # Run a graph, then list the checkpoints + >>> config = {"configurable": {"thread_id": "1"}} + >>> checkpoints = list(memory.list(config, limit=2)) + >>> print(checkpoints) + [CheckpointTuple(...), CheckpointTuple(...)] + """ + query = {} + if config is not None: + if "thread_id" in config["configurable"]: + query["thread_id"] = config["configurable"]["thread_id"] + if "checkpoint_ns" in config["configurable"]: + query["checkpoint_ns"] = config["configurable"]["checkpoint_ns"] + + if filter: + for key, value in filter.items(): + query[f"metadata.{key}"] = dumps_metadata(value) + + if before is not None: + query["checkpoint_id"] = {"$lt": before["configurable"]["checkpoint_id"]} + + result = self.checkpoint_collection.find( + query, limit=0 if limit is None else limit, sort=[("checkpoint_id", -1)] + ) + + for doc in result: + config_values = { + "thread_id": doc["thread_id"], + "checkpoint_ns": doc["checkpoint_ns"], + "checkpoint_id": doc["checkpoint_id"], + } + serialized_writes = self.writes_collection.find(config_values) + pending_writes = [ + ( + wrt["task_id"], + wrt["channel"], + self.serde.loads_typed((wrt["type"], wrt["value"])), + ) + for wrt in serialized_writes + ] + + yield CheckpointTuple( + config={ + "configurable": { + "thread_id": doc["thread_id"], + "checkpoint_ns": doc["checkpoint_ns"], + "checkpoint_id": doc["checkpoint_id"], + } + }, + checkpoint=self.serde.loads_typed((doc["type"], doc["checkpoint"])), + metadata=loads_metadata(doc["metadata"]), + parent_config=( + { + "configurable": { + "thread_id": doc["thread_id"], + "checkpoint_ns": doc["checkpoint_ns"], + "checkpoint_id": doc["parent_checkpoint_id"], + } + } + if doc.get("parent_checkpoint_id") + else None + ), + pending_writes=pending_writes, + ) + + def put( + self, + config: RunnableConfig, + checkpoint: Checkpoint, + metadata: CheckpointMetadata, + new_versions: ChannelVersions, + ) -> RunnableConfig: + """Save a checkpoint to the database. + + This method saves a checkpoint to the MongoDB database. The checkpoint is associated + with the provided config and its parent config (if any). + + Args: + config (RunnableConfig): The config to associate with the checkpoint. + checkpoint (Checkpoint): The checkpoint to save. + metadata (CheckpointMetadata): Additional metadata to save with the checkpoint. + new_versions (ChannelVersions): New channel versions as of this write. + + Returns: + RunnableConfig: Updated configuration after storing the checkpoint. + + Examples: + + >>> from langgraph.checkpoint.mongodb import MongoDBSaver + >>> with MongoDBSaver.from_conn_string("mongodb://localhost:27017") as memory: + >>> config = {"configurable": {"thread_id": "1", "checkpoint_ns": ""}} + >>> checkpoint = {"ts": "2024-05-04T06:32:42.235444+00:00", "id": "1ef4f797-8335-6428-8001-8a1503f9b875", "data": {"key": "value"}} + >>> saved_config = memory.put(config, checkpoint, {"source": "input", "step": 1, "writes": {"key": "value"}}, {}) + >>> print(saved_config) + {'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef4f797-8335-6428-8001-8a1503f9b875'}} + """ + thread_id = config["configurable"]["thread_id"] + checkpoint_ns = config["configurable"]["checkpoint_ns"] + checkpoint_id = checkpoint["id"] + type_, serialized_checkpoint = self.serde.dumps_typed(checkpoint) + doc = { + "parent_checkpoint_id": config["configurable"].get("checkpoint_id"), + "type": type_, + "checkpoint": serialized_checkpoint, + "metadata": dumps_metadata(metadata), + } + upsert_query = { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": checkpoint_id, + } + self.checkpoint_collection.update_one(upsert_query, {"$set": doc}, upsert=True) + return { + "configurable": { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": checkpoint_id, + } + } + + def put_writes( + self, + config: RunnableConfig, + writes: Sequence[Tuple[str, Any]], + task_id: str, + ) -> None: + """Store intermediate writes linked to a checkpoint. + + This method saves intermediate writes associated with a checkpoint to the MongoDB database. + + Args: + config (RunnableConfig): Configuration of the related checkpoint. + writes (Sequence[Tuple[str, Any]]): List of writes to store, each as (channel, value) pair. + task_id (str): Identifier for the task creating the writes. + """ + thread_id = config["configurable"]["thread_id"] + checkpoint_ns = config["configurable"]["checkpoint_ns"] + checkpoint_id = config["configurable"]["checkpoint_id"] + set_method = ( # Allow replacement on existing writes only if there were errors. + "$set" if all(w[0] in WRITES_IDX_MAP for w in writes) else "$setOnInsert" + ) + operations = [] + for idx, (channel, value) in enumerate(writes): + upsert_query = { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": checkpoint_id, + "task_id": task_id, + "idx": WRITES_IDX_MAP.get(channel, idx), + } + type_, serialized_value = self.serde.dumps_typed(value) + operations.append( + UpdateOne( + upsert_query, + { + set_method: { + "channel": channel, + "type": type_, + "value": serialized_value, + } + }, + upsert=True, + ) + ) + self.writes_collection.bulk_write(operations) diff --git a/libs/langgraph-checkpoint-mongodb/langgraph/checkpoint/mongodb/aio.py b/libs/langgraph-checkpoint-mongodb/langgraph/checkpoint/mongodb/aio.py new file mode 100644 index 0000000..653bdb1 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/langgraph/checkpoint/mongodb/aio.py @@ -0,0 +1,456 @@ +import asyncio +import builtins +import sys +from contextlib import asynccontextmanager +from typing import ( + Any, + AsyncIterator, + Dict, + Iterator, + Optional, + Sequence, + Tuple, +) + +from langchain_core.runnables import RunnableConfig +from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase +from pymongo import UpdateOne + +from langgraph.checkpoint.base import ( + WRITES_IDX_MAP, + BaseCheckpointSaver, + ChannelVersions, + Checkpoint, + CheckpointMetadata, + CheckpointTuple, + get_checkpoint_id, +) + +from .utils import dumps_metadata, loads_metadata + +if sys.version_info >= (3, 10): + anext = builtins.anext + aiter = builtins.aiter +else: + + async def anext(cls: Any) -> Any: + """Compatibility function until we drop 3.9 support: https://docs.python.org/3/library/functions.html#anext.""" + return await cls.__anext__() + + def aiter(cls: Any) -> Any: + """Compatibility function until we drop 3.9 support: https://docs.python.org/3/library/functions.html#anext.""" + return cls.__aiter__() + + +class AsyncMongoDBSaver(BaseCheckpointSaver): + """A checkpoint saver that stores checkpoints in a MongoDB database asynchronously. + + The synchronous MongoDBSaver has extended documentation, but + Asynchronous usage is shown below. + + Examples: + >>> import asyncio + >>> from langgraph.checkpoint.mongodb.aio import AsyncMongoDBSaver + >>> from langgraph.graph import StateGraph + + >>> async def main(): + >>> builder = StateGraph(int) + >>> builder.add_node("add_one", lambda x: x + 1) + >>> builder.set_entry_point("add_one") + >>> builder.set_finish_point("add_one") + >>> async with AsyncMongoDBSaver.from_conn_string("mongodb://localhost:27017") as memory: + >>> graph = builder.compile(checkpointer=memory) + >>> config = {"configurable": {"thread_id": "1"}} + >>> input = 3 + >>> output = await graph.ainvoke(input, config) + >>> print(f"{input=}, {output=}") + + >>> if __name__ == "__main__": + >>> asyncio.run(main()) + input=3, output=4 + """ + + client: AsyncIOMotorClient + db: AsyncIOMotorDatabase + + def __init__( + self, + client: AsyncIOMotorClient, + db_name: str = "checkpointing_db", + checkpoint_collection_name: str = "checkpoints_aio", + writes_collection_name: str = "checkpoint_writes_aio", + **kwargs: Any, + ) -> None: + super().__init__() + self.client = client + self.db = self.client[db_name] + self.checkpoint_collection = self.db[checkpoint_collection_name] + self.writes_collection = self.db[writes_collection_name] + self.loop = asyncio.get_running_loop() + + @classmethod + @asynccontextmanager + async def from_conn_string( + cls, + conn_string: str, + db_name: str = "checkpointing_db", + checkpoint_collection_name: str = "checkpoints_aio", + writes_collection_name: str = "checkpoint_writes_aio", + **kwargs: Any, + ) -> AsyncIterator["AsyncMongoDBSaver"]: + client: Optional[AsyncIOMotorClient] = None + try: + client = AsyncIOMotorClient(conn_string) + yield AsyncMongoDBSaver( + client, + db_name, + checkpoint_collection_name, + writes_collection_name, + **kwargs, + ) + finally: + if client: + client.close() + + async def aget_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: + """Get a checkpoint tuple from the database asynchronously. + + This method retrieves a checkpoint tuple from the MongoDB database based on the + provided config. If the config contains a "checkpoint_id" key, the checkpoint with + the matching thread ID and checkpoint ID is retrieved. Otherwise, the latest checkpoint + for the given thread ID is retrieved. + + Args: + config (RunnableConfig): The config to use for retrieving the checkpoint. + + Returns: + Optional[CheckpointTuple]: The retrieved checkpoint tuple, or None if no matching checkpoint was found. + """ + thread_id = config["configurable"]["thread_id"] + checkpoint_ns = config["configurable"].get("checkpoint_ns", "") + if checkpoint_id := get_checkpoint_id(config): + query = { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": checkpoint_id, + } + else: + query = {"thread_id": thread_id, "checkpoint_ns": checkpoint_ns} + + result = self.checkpoint_collection.find( + query, sort=[("checkpoint_id", -1)], limit=1 + ) + async for doc in result: + config_values = { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": doc["checkpoint_id"], + } + checkpoint = self.serde.loads_typed((doc["type"], doc["checkpoint"])) + serialized_writes = self.writes_collection.find(config_values) + pending_writes = [ + ( + wrt["task_id"], + wrt["channel"], + self.serde.loads_typed((wrt["type"], wrt["value"])), + ) + async for wrt in serialized_writes + ] + return CheckpointTuple( + {"configurable": config_values}, + checkpoint, + loads_metadata(doc["metadata"]), + ( + { + "configurable": { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": doc["parent_checkpoint_id"], + } + } + if doc.get("parent_checkpoint_id") + else None + ), + pending_writes, + ) + + async def alist( + self, + config: Optional[RunnableConfig], + *, + filter: Optional[Dict[str, Any]] = None, + before: Optional[RunnableConfig] = None, + limit: Optional[int] = None, + ) -> AsyncIterator[CheckpointTuple]: + """List checkpoints from the database asynchronously. + + This method retrieves a list of checkpoint tuples from the MongoDB database based + on the provided config. The checkpoints are ordered by checkpoint ID in descending order (newest first). + + Args: + config (Optional[RunnableConfig]): Base configuration for filtering checkpoints. + filter (Optional[Dict[str, Any]]): Additional filtering criteria for metadata. + before (Optional[RunnableConfig]): If provided, only checkpoints before the specified checkpoint ID are returned. Defaults to None. + limit (Optional[int]): Maximum number of checkpoints to return. + + Yields: + AsyncIterator[CheckpointTuple]: An asynchronous iterator of matching checkpoint tuples. + """ + query = {} + if config is not None: + if "thread_id" in config["configurable"]: + query["thread_id"] = config["configurable"]["thread_id"] + if "checkpoint_ns" in config["configurable"]: + query["checkpoint_ns"] = config["configurable"]["checkpoint_ns"] + + if filter: + for key, value in filter.items(): + query[f"metadata.{key}"] = dumps_metadata(value) + + if before is not None: + query["checkpoint_id"] = {"$lt": before["configurable"]["checkpoint_id"]} + + result = self.checkpoint_collection.find( + query, limit=0 if limit is None else limit, sort=[("checkpoint_id", -1)] + ) + + async for doc in result: + config_values = { + "thread_id": doc["thread_id"], + "checkpoint_ns": doc["checkpoint_ns"], + "checkpoint_id": doc["checkpoint_id"], + } + serialized_writes = self.writes_collection.find(config_values) + pending_writes = [ + ( + wrt["task_id"], + wrt["channel"], + self.serde.loads_typed((wrt["type"], wrt["value"])), + ) + async for wrt in serialized_writes + ] + + yield CheckpointTuple( + config={ + "configurable": { + "thread_id": doc["thread_id"], + "checkpoint_ns": doc["checkpoint_ns"], + "checkpoint_id": doc["checkpoint_id"], + } + }, + checkpoint=self.serde.loads_typed((doc["type"], doc["checkpoint"])), + metadata=loads_metadata(doc["metadata"]), + parent_config=( + { + "configurable": { + "thread_id": doc["thread_id"], + "checkpoint_ns": doc["checkpoint_ns"], + "checkpoint_id": doc["parent_checkpoint_id"], + } + } + if doc.get("parent_checkpoint_id") + else None + ), + pending_writes=pending_writes, + ) + + async def aput( + self, + config: RunnableConfig, + checkpoint: Checkpoint, + metadata: CheckpointMetadata, + new_versions: ChannelVersions, + ) -> RunnableConfig: + """Save a checkpoint to the database asynchronously. + + This method saves a checkpoint to the MongoDB database. The checkpoint is associated + with the provided config and its parent config (if any). + + Args: + config (RunnableConfig): The config to associate with the checkpoint. + checkpoint (Checkpoint): The checkpoint to save. + metadata (CheckpointMetadata): Additional metadata to save with the checkpoint. + new_versions (ChannelVersions): New channel versions as of this write. + + Returns: + RunnableConfig: Updated configuration after storing the checkpoint. + """ + thread_id = config["configurable"]["thread_id"] + checkpoint_ns = config["configurable"]["checkpoint_ns"] + checkpoint_id = checkpoint["id"] + type_, serialized_checkpoint = self.serde.dumps_typed(checkpoint) + doc = { + "parent_checkpoint_id": config["configurable"].get("checkpoint_id"), + "type": type_, + "checkpoint": serialized_checkpoint, + "metadata": dumps_metadata(metadata), + } + upsert_query = { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": checkpoint_id, + } + # Perform your operations here + await self.checkpoint_collection.update_one( + upsert_query, {"$set": doc}, upsert=True + ) + return { + "configurable": { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": checkpoint_id, + } + } + + async def aput_writes( + self, + config: RunnableConfig, + writes: Sequence[Tuple[str, Any]], + task_id: str, + ) -> None: + """Store intermediate writes linked to a checkpoint asynchronously. + + This method saves intermediate writes associated with a checkpoint to the database. + + Args: + config (RunnableConfig): Configuration of the related checkpoint. + writes (Sequence[Tuple[str, Any]]): List of writes to store, each as (channel, value) pair. + task_id (str): Identifier for the task creating the writes. + """ + thread_id = config["configurable"]["thread_id"] + checkpoint_ns = config["configurable"]["checkpoint_ns"] + checkpoint_id = config["configurable"]["checkpoint_id"] + set_method = ( # Allow replacement on existing writes only if there were errors. + "$set" if all(w[0] in WRITES_IDX_MAP for w in writes) else "$setOnInsert" + ) + operations = [] + for idx, (channel, value) in enumerate(writes): + upsert_query = { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": checkpoint_id, + "task_id": task_id, + "idx": WRITES_IDX_MAP.get(channel, idx), + } + type_, serialized_value = self.serde.dumps_typed(value) + operations.append( + UpdateOne( + upsert_query, + { + set_method: { + "channel": channel, + "type": type_, + "value": serialized_value, + } + }, + upsert=True, + ) + ) + await self.writes_collection.bulk_write(operations) + + def list( + self, + config: Optional[RunnableConfig], + *, + filter: Optional[dict[str, Any]] = None, + before: Optional[RunnableConfig] = None, + limit: Optional[int] = None, + ) -> Iterator[CheckpointTuple]: + """List checkpoints from the database. + + This method retrieves a list of checkpoint tuples from the MongoDB database + based on the provided config. The checkpoints are ordered by checkpoint ID in + descending order (newest first). + + Args: + config (Optional[RunnableConfig]): Base configuration for filtering checkpoints. + filter (Optional[Dict[str, Any]]): Additional filtering criteria for metadata. + before (Optional[RunnableConfig]): If provided, only checkpoints before the specified checkpoint ID are returned. Defaults to None. + limit (Optional[int]): Maximum number of checkpoints to return. + + Yields: + Iterator[CheckpointTuple]: An iterator of matching checkpoint tuples. + """ + aiter_ = self.alist(config, filter=filter, before=before, limit=limit) + while True: + try: + yield asyncio.run_coroutine_threadsafe( + anext(aiter_), + self.loop, + ).result() + except StopAsyncIteration: + break + + def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: + """Get a checkpoint tuple from the database. + + This method retrieves a checkpoint tuple from the MongoDB database based on + the provided config. If the config contains a "checkpoint_id" key, the + checkpoint with the matching thread ID and "checkpoint_id" is retrieved. + Otherwise, the latest checkpoint for the given thread ID is retrieved. + + Args: + config (RunnableConfig): The config to use for retrieving the checkpoint. + + Returns: + Optional[CheckpointTuple]: The retrieved checkpoint tuple, or None if no matching checkpoint was found. + """ + try: + # check if we are in the main thread, only bg threads can block + # we don't check in other methods to avoid the overhead + if asyncio.get_running_loop() is self.loop: + raise asyncio.InvalidStateError( + "Synchronous calls to AsyncMongoDBSaver are only allowed from a " + "different thread. From the main thread, use the async interface." + "For example, use `await checkpointer.aget_tuple(...)` or `await " + "graph.ainvoke(...)`." + ) + except RuntimeError: + pass + return asyncio.run_coroutine_threadsafe( + self.aget_tuple(config), self.loop + ).result() + + def put( + self, + config: RunnableConfig, + checkpoint: Checkpoint, + metadata: CheckpointMetadata, + new_versions: ChannelVersions, + ) -> RunnableConfig: + """Save a checkpoint to the database. + + This method saves a checkpoint to the MongoDB database. The checkpoint + is associated with the provided config and its parent config (if any). + + Args: + config (RunnableConfig): The config to associate with the checkpoint. + checkpoint (Checkpoint): The checkpoint to save. + metadata (CheckpointMetadata): Additional metadata to save with the checkpoint. + new_versions (ChannelVersions): New channel versions as of this write. + + Returns: + RunnableConfig: Updated configuration after storing the checkpoint. + """ + return asyncio.run_coroutine_threadsafe( + self.aput(config, checkpoint, metadata, new_versions), self.loop + ).result() + + def put_writes( + self, + config: RunnableConfig, + writes: Sequence[tuple[str, Any]], + task_id: str, + ) -> None: + """Store intermediate writes linked to a checkpoint. + + This method saves intermediate writes associated with a checkpoint to the database. + + Args: + config (RunnableConfig): Configuration of the related checkpoint. + writes (Sequence[Tuple[str, Any]]): List of writes to store, each as (channel, value) pair. + task_id (str): Identifier for the task creating the writes. + """ + return asyncio.run_coroutine_threadsafe( + self.aput_writes(config, writes, task_id), self.loop + ).result() diff --git a/libs/langgraph-checkpoint-mongodb/langgraph/checkpoint/mongodb/utils.py b/libs/langgraph-checkpoint-mongodb/langgraph/checkpoint/mongodb/utils.py new file mode 100644 index 0000000..e94d310 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/langgraph/checkpoint/mongodb/utils.py @@ -0,0 +1,42 @@ +from typing import Any, Dict, Union + +from langgraph.checkpoint.base import CheckpointMetadata +from langgraph.checkpoint.serde.base import SerializerProtocol +from langgraph.checkpoint.serde.jsonplus import JsonPlusSerializer + +serde: SerializerProtocol = JsonPlusSerializer() + + +def loads_metadata(metadata: dict[str, Any]) -> CheckpointMetadata: + """Deserialize metadata document + + The CheckpointMetadata class itself cannot be stored directly in MongoDB, + but as a dictionary it can. For efficient filtering in MongoDB, + we keep dict keys as strings. + + metadata is stored in MongoDB collection with string keys and + serde serialized keys. + """ + if isinstance(metadata, dict): + output = dict() + for key, value in metadata.items(): + output[key] = loads_metadata(value) + return output + else: + return serde.loads(metadata) + + +def dumps_metadata( + metadata: Union[CheckpointMetadata, Any], +) -> Union[bytes, Dict[str, Any]]: + """Serialize all values in metadata dictionary. + + Keep dict keys as strings for efficient filtering in MongoDB + """ + if isinstance(metadata, dict): + output = dict() + for key, value in metadata.items(): + output[key] = dumps_metadata(value) + return output + else: + return serde.dumps(metadata) diff --git a/libs/langgraph-checkpoint-mongodb/poetry.lock b/libs/langgraph-checkpoint-mongodb/poetry.lock new file mode 100644 index 0000000..c460388 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/poetry.lock @@ -0,0 +1,1259 @@ +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.6.2.post1" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +files = [ + {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, + {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "certifi" +version = "2024.8.30" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, +] + +[[package]] +name = "codespell" +version = "2.3.0" +description = "Codespell" +optional = false +python-versions = ">=3.8" +files = [ + {file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"}, + {file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"}, +] + +[package.extras] +dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"] +hard-encoding-detection = ["chardet"] +toml = ["tomli"] +types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "dnspython" +version = "2.7.0" +description = "DNS toolkit" +optional = false +python-versions = ">=3.9" +files = [ + {file = "dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86"}, + {file = "dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1"}, +] + +[package.extras] +dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "hypercorn (>=0.16.0)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "quart-trio (>=0.11.0)", "sphinx (>=7.2.0)", "sphinx-rtd-theme (>=2.0.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"] +dnssec = ["cryptography (>=43)"] +doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"] +doq = ["aioquic (>=1.0.0)"] +idna = ["idna (>=3.7)"] +trio = ["trio (>=0.23)"] +wmi = ["wmi (>=1.5.1)"] + +[[package]] +name = "docopt" +version = "0.6.2" +description = "Pythonic argument parser, that will make you smile" +optional = false +python-versions = "*" +files = [ + {file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.7" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httpx" +version = "0.27.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "httpx-sse" +version = "0.4.0" +description = "Consume Server-Sent Event (SSE) messages with HTTPX." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, + {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, +] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "jsonpatch" +version = "1.33" +description = "Apply JSON-Patches (RFC 6902)" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +files = [ + {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, + {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, +] + +[package.dependencies] +jsonpointer = ">=1.9" + +[[package]] +name = "jsonpointer" +version = "3.0.0" +description = "Identify specific nodes in a JSON document (RFC 6901)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, + {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, +] + +[[package]] +name = "langchain-core" +version = "0.3.19" +description = "Building applications with LLMs through composability" +optional = false +python-versions = ">=3.9,<4.0" +files = [] +develop = false + +[package.dependencies] +jsonpatch = "^1.33" +langsmith = "^0.1.125" +packaging = ">=23.2,<25" +pydantic = [ + {version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""}, + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, +] +PyYAML = ">=5.3" +tenacity = ">=8.1.0,!=8.4.0,<10.0.0" +typing-extensions = ">=4.7" + +[package.source] +type = "git" +url = "https://github.com/langchain-ai/langchain.git" +reference = "HEAD" +resolved_reference = "f173b72e35979b842933774c9c4568c329a0ae8a" +subdirectory = "libs/core" + +[[package]] +name = "langgraph" +version = "0.2.53" +description = "Building stateful, multi-actor applications with LLMs" +optional = false +python-versions = ">=3.9.0,<4.0" +files = [] +develop = false + +[package.dependencies] +langchain-core = ">=0.2.43,<0.4.0,!=0.3.0,!=0.3.1,!=0.3.2,!=0.3.3,!=0.3.4,!=0.3.5,!=0.3.6,!=0.3.7,!=0.3.8,!=0.3.9,!=0.3.10,!=0.3.11,!=0.3.12,!=0.3.13,!=0.3.14" +langgraph-checkpoint = "^2.0.4" +langgraph-sdk = "^0.1.32" + +[package.source] +type = "git" +url = "https://github.com/langchain-ai/langgraph.git" +reference = "HEAD" +resolved_reference = "3eedeac0d4d2cdce5fdaf5431083ffe4635502f0" +subdirectory = "libs/langgraph" + +[[package]] +name = "langgraph-checkpoint" +version = "2.0.5" +description = "Library with base interfaces for LangGraph checkpoint savers." +optional = false +python-versions = "^3.9.0,<4.0" +files = [] +develop = false + +[package.dependencies] +langchain-core = ">=0.2.38,<0.4" +msgpack = "^1.1.0" + +[package.source] +type = "git" +url = "https://github.com/langchain-ai/langgraph.git" +reference = "HEAD" +resolved_reference = "3eedeac0d4d2cdce5fdaf5431083ffe4635502f0" +subdirectory = "libs/checkpoint" + +[[package]] +name = "langgraph-sdk" +version = "0.1.36" +description = "SDK for interacting with LangGraph API" +optional = false +python-versions = "<4.0.0,>=3.9.0" +files = [ + {file = "langgraph_sdk-0.1.36-py3-none-any.whl", hash = "sha256:b11e1f0bc67631134d09d50c812dc73f9eb30394764ae1144d7d2a786a715355"}, + {file = "langgraph_sdk-0.1.36.tar.gz", hash = "sha256:2a2c651b7851ba15aeaab7e4e3ea7fd8357ef1cb0b592f264916fa990cdda6e7"}, +] + +[package.dependencies] +httpx = ">=0.25.2" +httpx-sse = ">=0.4.0" +orjson = ">=3.10.1" + +[[package]] +name = "langsmith" +version = "0.1.144" +description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langsmith-0.1.144-py3-none-any.whl", hash = "sha256:08ffb975bff2e82fc6f5428837c64c074ea25102d08a25e256361a80812c6100"}, + {file = "langsmith-0.1.144.tar.gz", hash = "sha256:b621f358d5a33441d7b5e7264c376bf4ea82bfc62d7e41aafc0f8094e3bd6369"}, +] + +[package.dependencies] +httpx = ">=0.23.0,<1" +orjson = {version = ">=3.9.14,<4.0.0", markers = "platform_python_implementation != \"PyPy\""} +pydantic = [ + {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, +] +requests = ">=2,<3" +requests-toolbelt = ">=1.0.0,<2.0.0" + +[[package]] +name = "motor" +version = "3.6.0" +description = "Non-blocking MongoDB driver for Tornado or asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "motor-3.6.0-py3-none-any.whl", hash = "sha256:9f07ed96f1754963d4386944e1b52d403a5350c687edc60da487d66f98dbf894"}, + {file = "motor-3.6.0.tar.gz", hash = "sha256:0ef7f520213e852bf0eac306adf631aabe849227d8aec900a2612512fb9c5b8d"}, +] + +[package.dependencies] +pymongo = ">=4.9,<4.10" + +[package.extras] +aws = ["pymongo[aws] (>=4.5,<5)"] +docs = ["aiohttp", "furo (==2024.8.6)", "readthedocs-sphinx-search (>=0.3,<1.0)", "sphinx (>=5.3,<8)", "sphinx-rtd-theme (>=2,<3)", "tornado"] +encryption = ["pymongo[encryption] (>=4.5,<5)"] +gssapi = ["pymongo[gssapi] (>=4.5,<5)"] +ocsp = ["pymongo[ocsp] (>=4.5,<5)"] +snappy = ["pymongo[snappy] (>=4.5,<5)"] +test = ["aiohttp (>=3.8.7)", "cffi (>=1.17.0rc1)", "mockupdb", "pymongo[encryption] (>=4.5,<5)", "pytest (>=7)", "pytest-asyncio", "tornado (>=5)"] +zstd = ["pymongo[zstd] (>=4.5,<5)"] + +[[package]] +name = "msgpack" +version = "1.1.0" +description = "MessagePack serializer" +optional = false +python-versions = ">=3.8" +files = [ + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"}, + {file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"}, + {file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"}, + {file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"}, + {file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"}, + {file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"}, + {file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"}, + {file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"}, + {file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"}, + {file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"}, + {file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"}, + {file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"}, + {file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"}, + {file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"}, +] + +[[package]] +name = "mypy" +version = "1.13.0" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "orjson" +version = "3.10.11" +description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +optional = false +python-versions = ">=3.8" +files = [ + {file = "orjson-3.10.11-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6dade64687f2bd7c090281652fe18f1151292d567a9302b34c2dbb92a3872f1f"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82f07c550a6ccd2b9290849b22316a609023ed851a87ea888c0456485a7d196a"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd9a187742d3ead9df2e49240234d728c67c356516cf4db018833a86f20ec18c"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77b0fed6f209d76c1c39f032a70df2d7acf24b1812ca3e6078fd04e8972685a3"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:63fc9d5fe1d4e8868f6aae547a7b8ba0a2e592929245fff61d633f4caccdcdd6"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65cd3e3bb4fbb4eddc3c1e8dce10dc0b73e808fcb875f9fab40c81903dd9323e"}, + {file = "orjson-3.10.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f67c570602300c4befbda12d153113b8974a3340fdcf3d6de095ede86c06d92"}, + {file = "orjson-3.10.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1f39728c7f7d766f1f5a769ce4d54b5aaa4c3f92d5b84817053cc9995b977acc"}, + {file = "orjson-3.10.11-cp310-none-win32.whl", hash = "sha256:1789d9db7968d805f3d94aae2c25d04014aae3a2fa65b1443117cd462c6da647"}, + {file = "orjson-3.10.11-cp310-none-win_amd64.whl", hash = "sha256:5576b1e5a53a5ba8f8df81872bb0878a112b3ebb1d392155f00f54dd86c83ff6"}, + {file = "orjson-3.10.11-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1444f9cb7c14055d595de1036f74ecd6ce15f04a715e73f33bb6326c9cef01b6"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdec57fe3b4bdebcc08a946db3365630332dbe575125ff3d80a3272ebd0ddafe"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eed32f33a0ea6ef36ccc1d37f8d17f28a1d6e8eefae5928f76aff8f1df85e67"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80df27dd8697242b904f4ea54820e2d98d3f51f91e97e358fc13359721233e4b"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:705f03cee0cb797256d54de6695ef219e5bc8c8120b6654dd460848d57a9af3d"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03246774131701de8e7059b2e382597da43144a9a7400f178b2a32feafc54bd5"}, + {file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8b5759063a6c940a69c728ea70d7c33583991c6982915a839c8da5f957e0103a"}, + {file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:677f23e32491520eebb19c99bb34675daf5410c449c13416f7f0d93e2cf5f981"}, + {file = "orjson-3.10.11-cp311-none-win32.whl", hash = "sha256:a11225d7b30468dcb099498296ffac36b4673a8398ca30fdaec1e6c20df6aa55"}, + {file = "orjson-3.10.11-cp311-none-win_amd64.whl", hash = "sha256:df8c677df2f9f385fcc85ab859704045fa88d4668bc9991a527c86e710392bec"}, + {file = "orjson-3.10.11-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:360a4e2c0943da7c21505e47cf6bd725588962ff1d739b99b14e2f7f3545ba51"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:496e2cb45de21c369079ef2d662670a4892c81573bcc143c4205cae98282ba97"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7dfa8db55c9792d53c5952900c6a919cfa377b4f4534c7a786484a6a4a350c19"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51f3382415747e0dbda9dade6f1e1a01a9d37f630d8c9049a8ed0e385b7a90c0"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f35a1b9f50a219f470e0e497ca30b285c9f34948d3c8160d5ad3a755d9299433"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2f3b7c5803138e67028dde33450e054c87e0703afbe730c105f1fcd873496d5"}, + {file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f91d9eb554310472bd09f5347950b24442600594c2edc1421403d7610a0998fd"}, + {file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dfbb2d460a855c9744bbc8e36f9c3a997c4b27d842f3d5559ed54326e6911f9b"}, + {file = "orjson-3.10.11-cp312-none-win32.whl", hash = "sha256:d4a62c49c506d4d73f59514986cadebb7e8d186ad510c518f439176cf8d5359d"}, + {file = "orjson-3.10.11-cp312-none-win_amd64.whl", hash = "sha256:f1eec3421a558ff7a9b010a6c7effcfa0ade65327a71bb9b02a1c3b77a247284"}, + {file = "orjson-3.10.11-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c46294faa4e4d0eb73ab68f1a794d2cbf7bab33b1dda2ac2959ffb7c61591899"}, + {file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52e5834d7d6e58a36846e059d00559cb9ed20410664f3ad156cd2cc239a11230"}, + {file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2fc947e5350fdce548bfc94f434e8760d5cafa97fb9c495d2fef6757aa02ec0"}, + {file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0efabbf839388a1dab5b72b5d3baedbd6039ac83f3b55736eb9934ea5494d258"}, + {file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a3f29634260708c200c4fe148e42b4aae97d7b9fee417fbdd74f8cfc265f15b0"}, + {file = "orjson-3.10.11-cp313-none-win32.whl", hash = "sha256:1a1222ffcee8a09476bbdd5d4f6f33d06d0d6642df2a3d78b7a195ca880d669b"}, + {file = "orjson-3.10.11-cp313-none-win_amd64.whl", hash = "sha256:bc274ac261cc69260913b2d1610760e55d3c0801bb3457ba7b9004420b6b4270"}, + {file = "orjson-3.10.11-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:19b3763e8bbf8ad797df6b6b5e0fc7c843ec2e2fc0621398534e0c6400098f87"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1be83a13312e5e58d633580c5eb8d0495ae61f180da2722f20562974188af205"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:afacfd1ab81f46dedd7f6001b6d4e8de23396e4884cd3c3436bd05defb1a6446"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb4d0bea56bba596723d73f074c420aec3b2e5d7d30698bc56e6048066bd560c"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96ed1de70fcb15d5fed529a656df29f768187628727ee2788344e8a51e1c1350"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4bfb30c891b530f3f80e801e3ad82ef150b964e5c38e1fb8482441c69c35c61c"}, + {file = "orjson-3.10.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d496c74fc2b61341e3cefda7eec21b7854c5f672ee350bc55d9a4997a8a95204"}, + {file = "orjson-3.10.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:655a493bac606655db9a47fe94d3d84fc7f3ad766d894197c94ccf0c5408e7d3"}, + {file = "orjson-3.10.11-cp38-none-win32.whl", hash = "sha256:b9546b278c9fb5d45380f4809e11b4dd9844ca7aaf1134024503e134ed226161"}, + {file = "orjson-3.10.11-cp38-none-win_amd64.whl", hash = "sha256:b592597fe551d518f42c5a2eb07422eb475aa8cfdc8c51e6da7054b836b26782"}, + {file = "orjson-3.10.11-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c95f2ecafe709b4e5c733b5e2768ac569bed308623c85806c395d9cca00e08af"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80c00d4acded0c51c98754fe8218cb49cb854f0f7eb39ea4641b7f71732d2cb7"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:461311b693d3d0a060439aa669c74f3603264d4e7a08faa68c47ae5a863f352d"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52ca832f17d86a78cbab86cdc25f8c13756ebe182b6fc1a97d534051c18a08de"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c57ea78a753812f528178aa2f1c57da633754c91d2124cb28991dab4c79a54"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7fcfc6f7ca046383fb954ba528587e0f9336828b568282b27579c49f8e16aad"}, + {file = "orjson-3.10.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:86b9dd983857970c29e4c71bb3e95ff085c07d3e83e7c46ebe959bac07ebd80b"}, + {file = "orjson-3.10.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4d83f87582d223e54efb2242a79547611ba4ebae3af8bae1e80fa9a0af83bb7f"}, + {file = "orjson-3.10.11-cp39-none-win32.whl", hash = "sha256:9fd0ad1c129bc9beb1154c2655f177620b5beaf9a11e0d10bac63ef3fce96950"}, + {file = "orjson-3.10.11-cp39-none-win_amd64.whl", hash = "sha256:10f416b2a017c8bd17f325fb9dee1fb5cdd7a54e814284896b7c3f2763faa017"}, + {file = "orjson-3.10.11.tar.gz", hash = "sha256:e35b6d730de6384d5b2dab5fd23f0d76fae8bbc8c353c2f78210aa5fa4beb3ef"}, +] + +[[package]] +name = "packaging" +version = "24.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pydantic" +version = "2.10.0" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.10.0-py3-none-any.whl", hash = "sha256:5e7807ba9201bdf61b1b58aa6eb690916c40a47acfb114b1b4fef3e7fd5b30fc"}, + {file = "pydantic-2.10.0.tar.gz", hash = "sha256:0aca0f045ff6e2f097f1fe89521115335f15049eeb8a7bef3dafe4b19a74e289"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.27.0" +typing-extensions = ">=4.12.2" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] + +[[package]] +name = "pydantic-core" +version = "2.27.0" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.27.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cd2ac6b919f7fed71b17fe0b4603c092a4c9b5bae414817c9c81d3c22d1e1bcc"}, + {file = "pydantic_core-2.27.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e015833384ca3e1a0565a79f5d953b0629d9138021c27ad37c92a9fa1af7623c"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db72e40628967f6dc572020d04b5f800d71264e0531c6da35097e73bdf38b003"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df45c4073bed486ea2f18757057953afed8dd77add7276ff01bccb79982cf46c"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:836a4bfe0cc6d36dc9a9cc1a7b391265bf6ce9d1eb1eac62ac5139f5d8d9a6fa"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bf1340ae507f6da6360b24179c2083857c8ca7644aab65807023cf35404ea8d"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ab325fc86fbc077284c8d7f996d904d30e97904a87d6fb303dce6b3de7ebba9"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1da0c98a85a6c6ed702d5556db3b09c91f9b0b78de37b7593e2de8d03238807a"}, + {file = "pydantic_core-2.27.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7b0202ebf2268954090209a84f9897345719e46a57c5f2c9b7b250ca0a9d3e63"}, + {file = "pydantic_core-2.27.0-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:35380671c3c921fe8adf31ad349dc6f7588b7e928dbe44e1093789734f607399"}, + {file = "pydantic_core-2.27.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b4c19525c3538fbc0bbda6229f9682fb8199ce9ac37395880e6952798e00373"}, + {file = "pydantic_core-2.27.0-cp310-none-win32.whl", hash = "sha256:333c840a1303d1474f491e7be0b718226c730a39ead0f7dab2c7e6a2f3855555"}, + {file = "pydantic_core-2.27.0-cp310-none-win_amd64.whl", hash = "sha256:99b2863c1365f43f74199c980a3d40f18a218fbe683dd64e470199db426c4d6a"}, + {file = "pydantic_core-2.27.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4523c4009c3f39d948e01962223c9f5538602e7087a628479b723c939fab262d"}, + {file = "pydantic_core-2.27.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:84af1cf7bfdcbc6fcf5a5f70cc9896205e0350306e4dd73d54b6a18894f79386"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e65466b31be1070b4a5b7dbfbd14b247884cb8e8b79c64fb0f36b472912dbaea"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a5c022bb0d453192426221605efc865373dde43b17822a264671c53b068ac20c"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bb69bf3b6500f195c3deb69c1205ba8fc3cb21d1915f1f158a10d6b1ef29b6a"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0aa4d1b2eba9a325897308b3124014a142cdccb9f3e016f31d3ebee6b5ea5e75"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e96ca781e0c01e32115912ebdf7b3fb0780ce748b80d7d28a0802fa9fbaf44e"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b872c86d8d71827235c7077461c502feb2db3f87d9d6d5a9daa64287d75e4fa0"}, + {file = "pydantic_core-2.27.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:82e1ad4ca170e8af4c928b67cff731b6296e6a0a0981b97b2eb7c275cc4e15bd"}, + {file = "pydantic_core-2.27.0-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:eb40f828bc2f73f777d1eb8fee2e86cd9692a4518b63b6b5aa8af915dfd3207b"}, + {file = "pydantic_core-2.27.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9a8fbf506fde1529a1e3698198fe64bfbe2e0c09557bc6a7dcf872e7c01fec40"}, + {file = "pydantic_core-2.27.0-cp311-none-win32.whl", hash = "sha256:24f984fc7762ed5f806d9e8c4c77ea69fdb2afd987b4fd319ef06c87595a8c55"}, + {file = "pydantic_core-2.27.0-cp311-none-win_amd64.whl", hash = "sha256:68950bc08f9735306322bfc16a18391fcaac99ded2509e1cc41d03ccb6013cfe"}, + {file = "pydantic_core-2.27.0-cp311-none-win_arm64.whl", hash = "sha256:3eb8849445c26b41c5a474061032c53e14fe92a11a5db969f722a2716cd12206"}, + {file = "pydantic_core-2.27.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:8117839a9bdbba86e7f9df57018fe3b96cec934c3940b591b0fd3fbfb485864a"}, + {file = "pydantic_core-2.27.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a291d0b4243a259c8ea7e2b84eb9ccb76370e569298875a7c5e3e71baf49057a"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84e35afd9e10b2698e6f2f32256678cb23ca6c1568d02628033a837638b3ed12"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:58ab0d979c969983cdb97374698d847a4acffb217d543e172838864636ef10d9"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0d06b667e53320332be2bf6f9461f4a9b78092a079b8ce8634c9afaa7e10cd9f"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78f841523729e43e3928a364ec46e2e3f80e6625a4f62aca5c345f3f626c6e8a"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:400bf470e4327e920883b51e255617dfe4496d4e80c3fea0b5a5d0bf2c404dd4"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:951e71da6c89d354572098bada5ba5b5dc3a9390c933af8a614e37755d3d1840"}, + {file = "pydantic_core-2.27.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2a51ce96224eadd1845150b204389623c8e129fde5a67a84b972bd83a85c6c40"}, + {file = "pydantic_core-2.27.0-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:483c2213a609e7db2c592bbc015da58b6c75af7360ca3c981f178110d9787bcf"}, + {file = "pydantic_core-2.27.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:359e7951f04ad35111b5ddce184db3391442345d0ab073aa63a95eb8af25a5ef"}, + {file = "pydantic_core-2.27.0-cp312-none-win32.whl", hash = "sha256:ee7d9d5537daf6d5c74a83b38a638cc001b648096c1cae8ef695b0c919d9d379"}, + {file = "pydantic_core-2.27.0-cp312-none-win_amd64.whl", hash = "sha256:2be0ad541bb9f059954ccf8877a49ed73877f862529575ff3d54bf4223e4dd61"}, + {file = "pydantic_core-2.27.0-cp312-none-win_arm64.whl", hash = "sha256:6e19401742ed7b69e51d8e4df3c03ad5ec65a83b36244479fd70edde2828a5d9"}, + {file = "pydantic_core-2.27.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:5f2b19b8d6fca432cb3acf48cf5243a7bf512988029b6e6fd27e9e8c0a204d85"}, + {file = "pydantic_core-2.27.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c86679f443e7085ea55a7376462553996c688395d18ef3f0d3dbad7838f857a2"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:510b11e9c3b1a852876d1ccd8d5903684336d635214148637ceb27366c75a467"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb704155e73b833801c247f39d562229c0303f54770ca14fb1c053acb376cf10"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ce048deb1e033e7a865ca384770bccc11d44179cf09e5193a535c4c2f497bdc"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58560828ee0951bb125c6f2862fbc37f039996d19ceb6d8ff1905abf7da0bf3d"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abb4785894936d7682635726613c44578c420a096729f1978cd061a7e72d5275"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2883b260f7a93235488699d39cbbd94fa7b175d3a8063fbfddd3e81ad9988cb2"}, + {file = "pydantic_core-2.27.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c6fcb3fa3855d583aa57b94cf146f7781d5d5bc06cb95cb3afece33d31aac39b"}, + {file = "pydantic_core-2.27.0-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:e851a051f7260e6d688267eb039c81f05f23a19431bd7dfa4bf5e3cb34c108cd"}, + {file = "pydantic_core-2.27.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:edb1bfd45227dec8d50bc7c7d86463cd8728bcc574f9b07de7369880de4626a3"}, + {file = "pydantic_core-2.27.0-cp313-none-win32.whl", hash = "sha256:678f66462058dd978702db17eb6a3633d634f7aa0deaea61e0a674152766d3fc"}, + {file = "pydantic_core-2.27.0-cp313-none-win_amd64.whl", hash = "sha256:d28ca7066d6cdd347a50d8b725dc10d9a1d6a1cce09836cf071ea6a2d4908be0"}, + {file = "pydantic_core-2.27.0-cp313-none-win_arm64.whl", hash = "sha256:6f4a53af9e81d757756508b57cae1cf28293f0f31b9fa2bfcb416cc7fb230f9d"}, + {file = "pydantic_core-2.27.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:e9f9feee7f334b72ceae46313333d002b56f325b5f04271b4ae2aadd9e993ae4"}, + {file = "pydantic_core-2.27.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:225bfff5d425c34e1fd562cef52d673579d59b967d9de06178850c4802af9039"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921ad596ff1a82f9c692b0758c944355abc9f0de97a4c13ca60ffc6d8dc15d4"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6354e18a9be37bfa124d6b288a87fb30c673745806c92956f1a25e3ae6e76b96"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ee4c2a75af9fe21269a4a0898c5425afb01af1f5d276063f57e2ae1bc64e191"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c91e3c04f5191fd3fb68764bddeaf02025492d5d9f23343b283870f6ace69708"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a6ebfac28fd51890a61df36ef202adbd77d00ee5aca4a3dadb3d9ed49cfb929"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36aa167f69d8807ba7e341d67ea93e50fcaaf6bc433bb04939430fa3dab06f31"}, + {file = "pydantic_core-2.27.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e8d89c276234579cd3d095d5fa2a44eb10db9a218664a17b56363cddf226ff3"}, + {file = "pydantic_core-2.27.0-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:5cc822ab90a70ea3a91e6aed3afac570b276b1278c6909b1d384f745bd09c714"}, + {file = "pydantic_core-2.27.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e15315691fe2253eb447503153acef4d7223dfe7e7702f9ed66539fcd0c43801"}, + {file = "pydantic_core-2.27.0-cp38-none-win32.whl", hash = "sha256:dfa5f5c0a4c8fced1422dc2ca7eefd872d5d13eb33cf324361dbf1dbfba0a9fe"}, + {file = "pydantic_core-2.27.0-cp38-none-win_amd64.whl", hash = "sha256:513cb14c0cc31a4dfd849a4674b20c46d87b364f997bbcb02282306f5e187abf"}, + {file = "pydantic_core-2.27.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:4148dc9184ab79e356dc00a4199dc0ee8647973332cb385fc29a7cced49b9f9c"}, + {file = "pydantic_core-2.27.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5fc72fbfebbf42c0856a824b8b0dc2b5cd2e4a896050281a21cfa6fed8879cb1"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:185ef205256cd8b38431205698531026979db89a79587725c1e55c59101d64e9"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:395e3e1148fa7809016231f8065f30bb0dc285a97b4dc4360cd86e17bab58af7"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33d14369739c5d07e2e7102cdb0081a1fa46ed03215e07f097b34e020b83b1ae"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7820bb0d65e3ce1e3e70b6708c2f66143f55912fa02f4b618d0f08b61575f12"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43b61989068de9ce62296cde02beffabcadb65672207fc51e7af76dca75e6636"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15e350efb67b855cd014c218716feea4986a149ed1f42a539edd271ee074a196"}, + {file = "pydantic_core-2.27.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:433689845288f9a1ee5714444e65957be26d30915f7745091ede4a83cfb2d7bb"}, + {file = "pydantic_core-2.27.0-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:3fd8bc2690e7c39eecdf9071b6a889ce7b22b72073863940edc2a0a23750ca90"}, + {file = "pydantic_core-2.27.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:884f1806609c2c66564082540cffc96868c5571c7c3cf3a783f63f2fb49bd3cd"}, + {file = "pydantic_core-2.27.0-cp39-none-win32.whl", hash = "sha256:bf37b72834e7239cf84d4a0b2c050e7f9e48bced97bad9bdf98d26b8eb72e846"}, + {file = "pydantic_core-2.27.0-cp39-none-win_amd64.whl", hash = "sha256:31a2cae5f059329f9cfe3d8d266d3da1543b60b60130d186d9b6a3c20a346361"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:4fb49cfdb53af5041aba909be00cccfb2c0d0a2e09281bf542371c5fd36ad04c"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:49633583eb7dc5cba61aaf7cdb2e9e662323ad394e543ee77af265736bcd3eaa"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:153017e3d6cd3ce979de06d84343ca424bb6092727375eba1968c8b4693c6ecb"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff63a92f6e249514ef35bc795de10745be0226eaea06eb48b4bbeaa0c8850a4a"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5982048129f40b082c2654de10c0f37c67a14f5ff9d37cf35be028ae982f26df"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:91bc66f878557313c2a6bcf396e7befcffe5ab4354cfe4427318968af31143c3"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:68ef5377eb582fa4343c9d0b57a5b094046d447b4c73dd9fbd9ffb216f829e7d"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c5726eec789ee38f2c53b10b1821457b82274f81f4f746bb1e666d8741fcfadb"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c0c431e4be5c1a0c6654e0c31c661cd89e0ca956ef65305c3c3fd96f4e72ca39"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8e21d927469d04b39386255bf00d0feedead16f6253dcc85e9e10ddebc334084"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b51f964fcbb02949fc546022e56cdb16cda457af485e9a3e8b78ac2ecf5d77e"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a7fd4de38f7ff99a37e18fa0098c3140286451bc823d1746ba80cec5b433a1"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fda87808429c520a002a85d6e7cdadbf58231d60e96260976c5b8f9a12a8e13"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8a150392102c402c538190730fda06f3bce654fc498865579a9f2c1d2b425833"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c9ed88b398ba7e3bad7bd64d66cc01dcde9cfcb7ec629a6fd78a82fa0b559d78"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:9fe94d9d2a2b4edd7a4b22adcd45814b1b59b03feb00e56deb2e89747aec7bfe"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d8b5ee4ae9170e2775d495b81f414cc20268041c42571530513496ba61e94ba3"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d29e235ce13c91902ef3efc3d883a677655b3908b1cbc73dee816e5e1f8f7739"}, + {file = "pydantic_core-2.27.0.tar.gz", hash = "sha256:f57783fbaf648205ac50ae7d646f27582fc706be3977e87c3c124e7a92407b10"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pymongo" +version = "4.9.2" +description = "Python driver for MongoDB " +optional = false +python-versions = ">=3.8" +files = [ + {file = "pymongo-4.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ab8d54529feb6e29035ba8f0570c99ad36424bc26486c238ad7ce28597bc43c8"}, + {file = "pymongo-4.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f928bdc152a995cbd0b563fab201b2df873846d11f7a41d1f8cc8a01b35591ab"}, + {file = "pymongo-4.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6e7251d59fa3dcbb1399a71a3aec63768cebc6b22180b671601c2195fe1f90a"}, + {file = "pymongo-4.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0e759ed0459e7264a11b6896016f616341a8e4c6ab7f71ae651bd21ffc7e9524"}, + {file = "pymongo-4.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f3fc60f242191840ccf02b898bc615b5141fbb70064f38f7e60fcaa35d3b5efd"}, + {file = "pymongo-4.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c798351666ac97a0ddaa823689061c3af949c2d6acf7fb2d9ab0a7f465ced79"}, + {file = "pymongo-4.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aac78b5fdd49ed8cae49adf76befacb02293a23b412676775c4715148e166d85"}, + {file = "pymongo-4.9.2-cp310-cp310-win32.whl", hash = "sha256:bf77bf175c315e299a91332c2bbebc097c4d4fcc8713e513a9861684aa39023a"}, + {file = "pymongo-4.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:c42b5aad8971256365bfd0a545fb1c7a199c93db80decd298ea2f987419e2a6d"}, + {file = "pymongo-4.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:99e40f44877b32bf4b3c46ceed2228f08c222cf7dec8a4366dd192a1429143fa"}, + {file = "pymongo-4.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6f6834d575ed87edc7dfcab4501d961b6a423b3839edd29ecb1382eee7736777"}, + {file = "pymongo-4.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3010018f5672e5b7e8d096dea9f1ea6545b05345ff0eb1754f6ee63785550773"}, + {file = "pymongo-4.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69394ee9f0ce38ff71266bad01b7e045cd75e58500ebad5d72187cbabf2e652a"}, + {file = "pymongo-4.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87b18094100f21615d9db99c255dcd9e93e476f10fb03c1d3632cf4b82d201d2"}, + {file = "pymongo-4.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3039e093d28376d6a54bdaa963ca12230c8a53d7b19c8e6368e19bcfbd004176"}, + {file = "pymongo-4.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ab42d9ee93fe6b90020c42cba5bfb43a2b4660951225d137835efc21940da48"}, + {file = "pymongo-4.9.2-cp311-cp311-win32.whl", hash = "sha256:a663ca60e187a248d370c58961e40f5463077d2b43831eb92120ea28a79ecf96"}, + {file = "pymongo-4.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:24e7b6887bbfefd05afed26a99a2c69459e2daa351a43a410de0d6c0ee3cce4e"}, + {file = "pymongo-4.9.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8083bbe8cb10bb33dca4d93f8223dd8d848215250bb73867374650bac5fe69e1"}, + {file = "pymongo-4.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a1b8c636bf557c7166e3799bbf1120806ca39e3f06615b141c88d9c9ceae4d8c"}, + {file = "pymongo-4.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8aac5dce28454f47576063fbad31ea9789bba67cab86c95788f97aafd810e65b"}, + {file = "pymongo-4.9.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1d5e7123af1fddf15b2b53e58f20bf5242884e671bcc3860f5e954fe13aeddd"}, + {file = "pymongo-4.9.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe97c847b56d61e533a7af0334193d6b28375b9189effce93129c7e4733794a9"}, + {file = "pymongo-4.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ad54433a996e2d1985a9cd8fc82538ca8747c95caae2daf453600cc8c317f9"}, + {file = "pymongo-4.9.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:98b9cade40f5b13e04492a42ae215c3721099be1014ddfe0fbd23f27e4f62c0c"}, + {file = "pymongo-4.9.2-cp312-cp312-win32.whl", hash = "sha256:dde6068ae7c62ea8ee2c5701f78c6a75618cada7e11f03893687df87709558de"}, + {file = "pymongo-4.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:e1ab6cd7cd2d38ffc7ccdc79fdc166c7a91a63f844a96e3e6b2079c054391c68"}, + {file = "pymongo-4.9.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1ad79d6a74f439a068caf9a1e2daeabc20bf895263435484bbd49e90fbea7809"}, + {file = "pymongo-4.9.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:877699e21703717507cbbea23e75b419f81a513b50b65531e1698df08b2d7094"}, + {file = "pymongo-4.9.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc9322ce7cf116458a637ac10517b0c5926a8211202be6dbdc51dab4d4a9afc8"}, + {file = "pymongo-4.9.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cca029f46acf475504eedb33c7839f030c4bc4f946dcba12d9a954cc48850b79"}, + {file = "pymongo-4.9.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c8c861e77527eec5a4b7363c16030dd0374670b620b08a5300f97594bbf5a40"}, + {file = "pymongo-4.9.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fc70326ae71b3c7b8d6af82f46bb71dafdba3c8f335b29382ae9cf263ef3a5c"}, + {file = "pymongo-4.9.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba9d2f6df977fee24437f82f7412460b0628cd6b961c4235c9cff71577a5b61f"}, + {file = "pymongo-4.9.2-cp313-cp313-win32.whl", hash = "sha256:b3254769e708bc4aa634745c262081d13c841a80038eff3afd15631540a1d227"}, + {file = "pymongo-4.9.2-cp313-cp313-win_amd64.whl", hash = "sha256:169b85728cc17800344ba17d736375f400ef47c9fbb4c42910c4b3e7c0247382"}, + {file = "pymongo-4.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c3f28afd783be3cebef1235a45340589169d7774cd9909ba0249e2f851ff511d"}, + {file = "pymongo-4.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a0b2e7fedc5911cd44590b5fd8e3714029f378f37f3c0c2043f67150b588d4a"}, + {file = "pymongo-4.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5af264b9a973859123e3129d131d7246f57659304400e3e6b35ed6eaf099854d"}, + {file = "pymongo-4.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65c6b2e2a6db38f49433021dda0802ad081118224b2264500ef03a2d82ae26a7"}, + {file = "pymongo-4.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:410ea165f2f819118eed764c5faa35fa71aeff5ce8b5046af99ed158a5661e9e"}, + {file = "pymongo-4.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c3c71337d4c923f719cb56253af9244e90353a2454088ee4f184bfb0dd446a4"}, + {file = "pymongo-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:77528a2b928fe3f1f655cefa195e6718ab1ccd1a456aba486d76318e526a7fac"}, + {file = "pymongo-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fdbd558d90b55d7c39c096a79f8a725f1f02b658211924ab98dbc03ecad01095"}, + {file = "pymongo-4.9.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e3ff4201ea707f57bf381f61df0e9cd6e896627a59f98a5d1c4a1bd14a2544cb"}, + {file = "pymongo-4.9.2-cp38-cp38-win32.whl", hash = "sha256:ae227bba43e2e6fc8c3440a70b3b8f9ab2b0eb0906d0d2cf814dd9490c572e2a"}, + {file = "pymongo-4.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:a92c96886048d3ebae62dbcfc775c7f2b965270160e3cb6aab4e06750e030b05"}, + {file = "pymongo-4.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e54e2c6f1dec45c57a587b4c13c16666d5f7c031a642ae177140d1e0551a947e"}, + {file = "pymongo-4.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a49d9292f22a0395c0fd2822a06e385910f1f902c3a9feafc1d0bfc27cd2df6b"}, + {file = "pymongo-4.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80a1ee9b72eebd96619ebe0beb718a5bcf2a70f464edf315f97b9315ed6854a9"}, + {file = "pymongo-4.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea9c47f86a322280381e9ddba7491e664ea80bf75df247ea2346faf7626e4e4c"}, + {file = "pymongo-4.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf963104dfd7235bebc44cef40b4b12c6638bb03b3a828cb495498e286b6edd0"}, + {file = "pymongo-4.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13330bdf4a57ef70bdd6282721547ec464f773203be47bac1efc4abd74a9190"}, + {file = "pymongo-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7fb10d7069f1e7d7d6a458b1c5e9d1454be6eca2d9885bec25c1202e22c88d2a"}, + {file = "pymongo-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cd832de5df92caa68ee66c872708951d7e0c1f7b289b74189f2ccf1832c56dda"}, + {file = "pymongo-4.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3f55efe0f77198c055800e605268bfd77a3f0223d1a80b55b771d0c350bc3ade"}, + {file = "pymongo-4.9.2-cp39-cp39-win32.whl", hash = "sha256:f2f43e5d6e739aa78c7053bdf351453c0e53d7667a3cac73255c2169631e052a"}, + {file = "pymongo-4.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:31c35d3dac5a1b0f65b3da2a19dc7fb88271c86329c75cfea775d5381ade6c06"}, + {file = "pymongo-4.9.2.tar.gz", hash = "sha256:3e63535946f5df7848307b9031aa921f82bb0cbe45f9b0c3296f2173f9283eb0"}, +] + +[package.dependencies] +dnspython = ">=1.16.0,<3.0.0" + +[package.extras] +aws = ["pymongo-auth-aws (>=1.1.0,<2.0.0)"] +docs = ["furo (==2023.9.10)", "readthedocs-sphinx-search (>=0.3,<1.0)", "sphinx (>=5.3,<8)", "sphinx-autobuild (>=2020.9.1)", "sphinx-rtd-theme (>=2,<3)", "sphinxcontrib-shellcheck (>=1,<2)"] +encryption = ["certifi", "pymongo-auth-aws (>=1.1.0,<2.0.0)", "pymongocrypt (>=1.10.0,<2.0.0)"] +gssapi = ["pykerberos", "winkerberos (>=0.5.0)"] +ocsp = ["certifi", "cryptography (>=2.5)", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"] +snappy = ["python-snappy"] +test = ["pytest (>=8.2)", "pytest-asyncio (>=0.24.0)"] +zstd = ["zstandard"] + +[[package]] +name = "pytest" +version = "7.4.4" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, + {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.21.2" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"}, + {file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] + +[[package]] +name = "pytest-mock" +version = "3.14.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, + {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "pytest-repeat" +version = "0.9.3" +description = "pytest plugin for repeating tests" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest_repeat-0.9.3-py3-none-any.whl", hash = "sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed"}, + {file = "pytest_repeat-0.9.3.tar.gz", hash = "sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185"}, +] + +[package.dependencies] +pytest = "*" + +[[package]] +name = "pytest-watch" +version = "4.2.0" +description = "Local continuous test runner with pytest and watchdog." +optional = false +python-versions = "*" +files = [ + {file = "pytest-watch-4.2.0.tar.gz", hash = "sha256:06136f03d5b361718b8d0d234042f7b2f203910d8568f63df2f866b547b3d4b9"}, +] + +[package.dependencies] +colorama = ">=0.3.3" +docopt = ">=0.4.0" +pytest = ">=2.6.4" +watchdog = ">=0.6.0" + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-toolbelt" +version = "1.0.0" +description = "A utility belt for advanced users of python-requests" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, + {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, +] + +[package.dependencies] +requests = ">=2.0.1,<3.0.0" + +[[package]] +name = "ruff" +version = "0.6.9" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"}, + {file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"}, + {file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"}, + {file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"}, + {file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"}, + {file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"}, + {file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "syrupy" +version = "4.7.2" +description = "Pytest Snapshot Test Utility" +optional = false +python-versions = ">=3.8.1" +files = [ + {file = "syrupy-4.7.2-py3-none-any.whl", hash = "sha256:eae7ba6be5aed190237caa93be288e97ca1eec5ca58760e4818972a10c4acc64"}, + {file = "syrupy-4.7.2.tar.gz", hash = "sha256:ea45e099f242de1bb53018c238f408a5bb6c82007bc687aefcbeaa0e1c2e935a"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9.0.0" + +[[package]] +name = "tenacity" +version = "9.0.0" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"}, + {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + +[[package]] +name = "tomli" +version = "2.1.0" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, + {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "urllib3" +version = "2.2.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "watchdog" +version = "6.0.0" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.9" +files = [ + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2"}, + {file = "watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a"}, + {file = "watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680"}, + {file = "watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f"}, + {file = "watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.9.0,<4.0" +content-hash = "c910f1b1c4b864164623039f8a378da2cd11afe41edc7bffe57f29b67ceb89da" diff --git a/libs/langgraph-checkpoint-mongodb/pyproject.toml b/libs/langgraph-checkpoint-mongodb/pyproject.toml new file mode 100644 index 0000000..3e7f922 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/pyproject.toml @@ -0,0 +1,69 @@ +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "langgraph-checkpoint-mongodb" +version = "0.1.0" +description = "Library with a MongoDB implementation of LangGraph checkpoint saver." +authors = [] +license = "MIT" +readme = "README.md" +repository = "https://www.github.com/langchain-ai/langchain-mongodb" +packages = [{ include = "langgraph" }] + +[tool.poetry.dependencies] +python = "^3.9.0,<4.0" +langgraph = "^0.2.38" +langgraph-checkpoint = "^2.0.0" +pymongo = "~4.9.0" +motor = ">3.5.0" + +[tool.poetry.group.dev] +optional = true + +[tool.poetry.group.dev.dependencies] +langchain-core = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/core"} +langgraph = {git = "https://github.com/langchain-ai/langgraph.git", subdirectory = "libs/langgraph"} +langgraph-checkpoint = {git = "https://github.com/langchain-ai/langgraph.git", subdirectory = "libs/checkpoint"} +# test +pytest = "^7.2.1" +anyio = "^4.4.0" +pytest-asyncio = "^0.21.1" +pytest-mock = "^3.11.1" +pytest-watch = "^4.2.0" +pytest-repeat = "^0.9.3" +syrupy = "^4.0.2" +# lint +ruff = "^0.6.2" +mypy = "^1.10.0" +codespell = "^2.2.0" + +[tool.pytest.ini_options] +addopts = "--strict-markers --strict-config --durations=5 -vv" +markers = [ + "requires: mark tests as requiring a specific library", + "compile: mark placeholder test used to compile integration tests without running them", +] +asyncio_mode = "auto" + +[tool.ruff] +lint.select = [ + "E", # pycodestyle + "F", # Pyflakes + "UP", # pyupgrade + "B", # flake8-bugbear + "I", # isort +] +lint.ignore = ["E501", "B008", "UP007", "UP006"] + +[tool.mypy] +# https://mypy.readthedocs.io/en/stable/config_file.html +disallow_untyped_defs = "True" +explicit_package_bases = "True" +warn_no_return = "False" +warn_unused_ignores = "True" +warn_redundant_casts = "True" +allow_redefinition = "True" +disable_error_code = "typeddict-item, return-value" + diff --git a/libs/langgraph-checkpoint-mongodb/tests/__snapshots__/test_pregel.ambr b/libs/langgraph-checkpoint-mongodb/tests/__snapshots__/test_pregel.ambr new file mode 100644 index 0000000..b2cf266 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/__snapshots__/test_pregel.ambr @@ -0,0 +1,2456 @@ +# serializer version: 1 +# name: test_branch_then[memory] + ''' + graph TD; + __start__ --> prepare; + finish --> __end__; + prepare -.-> tool_two_slow; + tool_two_slow --> finish; + prepare -.-> tool_two_fast; + tool_two_fast --> finish; + + ''' +# --- +# name: test_branch_then[memory].1 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + prepare(prepare) + tool_two_slow(tool_two_slow) + tool_two_fast(tool_two_fast) + finish(finish) + __end__([

__end__

]):::last + __start__ --> prepare; + finish --> __end__; + prepare -.-> tool_two_slow; + tool_two_slow --> finish; + prepare -.-> tool_two_fast; + tool_two_fast --> finish; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_branch_then[mongodb] + ''' + graph TD; + __start__ --> prepare; + finish --> __end__; + prepare -.-> tool_two_slow; + tool_two_slow --> finish; + prepare -.-> tool_two_fast; + tool_two_fast --> finish; + + ''' +# --- +# name: test_branch_then[mongodb].1 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + prepare(prepare) + tool_two_slow(tool_two_slow) + tool_two_fast(tool_two_fast) + finish(finish) + __end__([

__end__

]):::last + __start__ --> prepare; + finish --> __end__; + prepare -.-> tool_two_slow; + tool_two_slow --> finish; + prepare -.-> tool_two_fast; + tool_two_fast --> finish; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_conditional_entrypoint_graph + '{"title": "LangGraphInput"}' +# --- +# name: test_conditional_entrypoint_graph.1 + '{"title": "LangGraphOutput"}' +# --- +# name: test_conditional_entrypoint_graph.2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "left", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "left" + } + }, + { + "id": "right", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "right" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "right", + "target": "__end__" + }, + { + "source": "__start__", + "target": "left", + "data": "go-left", + "conditional": true + }, + { + "source": "__start__", + "target": "right", + "data": "go-right", + "conditional": true + }, + { + "source": "left", + "target": "__end__", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_entrypoint_graph.3 + ''' + graph TD; + right --> __end__; + __start__ -.  go-left  .-> left; + __start__ -.  go-right  .-> right; + left -.-> __end__; + + ''' +# --- +# name: test_conditional_entrypoint_graph_state + '{"properties": {"input": {"default": null, "title": "Input", "type": "string"}, "output": {"default": null, "title": "Output", "type": "string"}, "steps": {"default": null, "items": {"type": "string"}, "title": "Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_conditional_entrypoint_graph_state.1 + '{"properties": {"input": {"default": null, "title": "Input", "type": "string"}, "output": {"default": null, "title": "Output", "type": "string"}, "steps": {"default": null, "items": {"type": "string"}, "title": "Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}' +# --- +# name: test_conditional_entrypoint_graph_state.2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "left", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "left" + } + }, + { + "id": "right", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "right" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "right", + "target": "__end__" + }, + { + "source": "__start__", + "target": "left", + "data": "go-left", + "conditional": true + }, + { + "source": "__start__", + "target": "right", + "data": "go-right", + "conditional": true + }, + { + "source": "left", + "target": "__end__", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_entrypoint_graph_state.3 + ''' + graph TD; + right --> __end__; + __start__ -.  go-left  .-> left; + __start__ -.  go-right  .-> right; + left -.-> __end__; + + ''' +# --- +# name: test_conditional_entrypoint_to_multiple_state_graph + '{"properties": {"locations": {"items": {"type": "string"}, "title": "Locations", "type": "array"}, "results": {"items": {"type": "string"}, "title": "Results", "type": "array"}}, "required": ["locations", "results"], "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_conditional_entrypoint_to_multiple_state_graph.1 + '{"properties": {"locations": {"items": {"type": "string"}, "title": "Locations", "type": "array"}, "results": {"items": {"type": "string"}, "title": "Results", "type": "array"}}, "required": ["locations", "results"], "title": "LangGraphOutput", "type": "object"}' +# --- +# name: test_conditional_entrypoint_to_multiple_state_graph.2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "get_weather", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "get_weather" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "get_weather", + "target": "__end__" + }, + { + "source": "__start__", + "target": "get_weather", + "conditional": true + }, + { + "source": "__start__", + "target": "__end__", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_entrypoint_to_multiple_state_graph.3 + ''' + graph TD; + get_weather --> __end__; + __start__ -.-> get_weather; + __start__ -.-> __end__; + + ''' +# --- +# name: test_conditional_graph[memory] + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "langchain", + "schema", + "runnable", + "RunnableAssign" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "tools" + }, + "metadata": { + "parents": {}, + "version": 2, + "variant": "b" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "data": "exit", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_graph[memory].1 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + + ''' +# --- +# name: test_conditional_graph[memory].2 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + agent(agent) + tools(tools
parents = {} + version = 2 + variant = b) + __end__([

__end__

]):::last + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_conditional_graph[memory].3 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": 1, + "type": "schema", + "data": "ParallelInput" + }, + { + "id": 2, + "type": "schema", + "data": "ParallelOutput" + }, + { + "id": 3, + "type": "runnable", + "data": { + "id": [ + "langchain", + "prompts", + "prompt", + "PromptTemplate" + ], + "name": "PromptTemplate" + } + }, + { + "id": 4, + "type": "runnable", + "data": { + "id": [ + "langchain_core", + "language_models", + "fake", + "FakeStreamingListLLM" + ], + "name": "FakeStreamingListLLM" + } + }, + { + "id": 5, + "type": "runnable", + "data": { + "id": [ + "langchain_core", + "runnables", + "base", + "RunnableLambda" + ], + "name": "agent_parser" + } + }, + { + "id": 6, + "type": "runnable", + "data": { + "id": [ + "langchain", + "schema", + "runnable", + "RunnablePassthrough" + ], + "name": "Passthrough" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "tools" + }, + "metadata": { + "parents": {}, + "version": 2, + "variant": "b" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": 3, + "target": 4 + }, + { + "source": 4, + "target": 5 + }, + { + "source": 1, + "target": 3 + }, + { + "source": 5, + "target": 2 + }, + { + "source": 1, + "target": 6 + }, + { + "source": 6, + "target": 2 + }, + { + "source": "__start__", + "target": 1 + }, + { + "source": "tools", + "target": 1 + }, + { + "source": 2, + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": 2, + "target": "__end__", + "data": "exit", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_graph[memory].4 + ''' + graph TD; + PromptTemplate --> FakeStreamingListLLM; + FakeStreamingListLLM --> agent_parser; + Parallel_agent_outcome_Input --> PromptTemplate; + agent_parser --> Parallel_agent_outcome_Output; + Parallel_agent_outcome_Input --> Passthrough; + Passthrough --> Parallel_agent_outcome_Output; + __start__ --> Parallel_agent_outcome_Input; + tools --> Parallel_agent_outcome_Input; + Parallel_agent_outcome_Output -.  continue  .-> tools; + Parallel_agent_outcome_Output -.  exit  .-> __end__; + + ''' +# --- +# name: test_conditional_graph[memory].5 + dict({ + 'edges': list([ + dict({ + 'source': '__start__', + 'target': 'agent', + }), + dict({ + 'source': 'tools', + 'target': 'agent', + }), + dict({ + 'conditional': True, + 'data': 'continue', + 'source': 'agent', + 'target': 'tools', + }), + dict({ + 'conditional': True, + 'data': 'exit', + 'source': 'agent', + 'target': '__end__', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langchain', + 'schema', + 'runnable', + 'RunnableAssign', + ]), + 'name': 'agent', + }), + 'id': 'agent', + 'metadata': dict({ + '__interrupt': 'after', + }), + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tools', + }), + 'id': 'tools', + 'metadata': dict({ + 'parents': dict({ + }), + 'variant': 'b', + 'version': 2, + }), + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- +# name: test_conditional_graph[memory].6 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + agent(agent
__interrupt = after) + tools(tools
parents = {} + version = 2 + variant = b) + __end__([

__end__

]):::last + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_conditional_graph[mongodb] + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "langchain", + "schema", + "runnable", + "RunnableAssign" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "tools" + }, + "metadata": { + "parents": {}, + "version": 2, + "variant": "b" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "data": "exit", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_graph[mongodb].1 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + + ''' +# --- +# name: test_conditional_graph[mongodb].2 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + agent(agent) + tools(tools
parents = {} + version = 2 + variant = b) + __end__([

__end__

]):::last + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_conditional_graph[mongodb].3 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": 1, + "type": "schema", + "data": "ParallelInput" + }, + { + "id": 2, + "type": "schema", + "data": "ParallelOutput" + }, + { + "id": 3, + "type": "runnable", + "data": { + "id": [ + "langchain", + "prompts", + "prompt", + "PromptTemplate" + ], + "name": "PromptTemplate" + } + }, + { + "id": 4, + "type": "runnable", + "data": { + "id": [ + "langchain_core", + "language_models", + "fake", + "FakeStreamingListLLM" + ], + "name": "FakeStreamingListLLM" + } + }, + { + "id": 5, + "type": "runnable", + "data": { + "id": [ + "langchain_core", + "runnables", + "base", + "RunnableLambda" + ], + "name": "agent_parser" + } + }, + { + "id": 6, + "type": "runnable", + "data": { + "id": [ + "langchain", + "schema", + "runnable", + "RunnablePassthrough" + ], + "name": "Passthrough" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "tools" + }, + "metadata": { + "parents": {}, + "version": 2, + "variant": "b" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": 3, + "target": 4 + }, + { + "source": 4, + "target": 5 + }, + { + "source": 1, + "target": 3 + }, + { + "source": 5, + "target": 2 + }, + { + "source": 1, + "target": 6 + }, + { + "source": 6, + "target": 2 + }, + { + "source": "__start__", + "target": 1 + }, + { + "source": "tools", + "target": 1 + }, + { + "source": 2, + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": 2, + "target": "__end__", + "data": "exit", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_graph[mongodb].4 + ''' + graph TD; + PromptTemplate --> FakeStreamingListLLM; + FakeStreamingListLLM --> agent_parser; + Parallel_agent_outcome_Input --> PromptTemplate; + agent_parser --> Parallel_agent_outcome_Output; + Parallel_agent_outcome_Input --> Passthrough; + Passthrough --> Parallel_agent_outcome_Output; + __start__ --> Parallel_agent_outcome_Input; + tools --> Parallel_agent_outcome_Input; + Parallel_agent_outcome_Output -.  continue  .-> tools; + Parallel_agent_outcome_Output -.  exit  .-> __end__; + + ''' +# --- +# name: test_conditional_graph[mongodb].5 + dict({ + 'edges': list([ + dict({ + 'source': '__start__', + 'target': 'agent', + }), + dict({ + 'source': 'tools', + 'target': 'agent', + }), + dict({ + 'conditional': True, + 'data': 'continue', + 'source': 'agent', + 'target': 'tools', + }), + dict({ + 'conditional': True, + 'data': 'exit', + 'source': 'agent', + 'target': '__end__', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langchain', + 'schema', + 'runnable', + 'RunnableAssign', + ]), + 'name': 'agent', + }), + 'id': 'agent', + 'metadata': dict({ + '__interrupt': 'after', + }), + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tools', + }), + 'id': 'tools', + 'metadata': dict({ + 'parents': dict({ + }), + 'variant': 'b', + 'version': 2, + }), + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- +# name: test_conditional_graph[mongodb].6 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + agent(agent
__interrupt = after) + tools(tools
parents = {} + version = 2 + variant = b) + __end__([

__end__

]):::last + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_conditional_state_graph[memory] + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_conditional_state_graph[memory].1 + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}' +# --- +# name: test_conditional_state_graph[memory].2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "langchain", + "schema", + "runnable", + "RunnableSequence" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "tools" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "data": "exit", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_state_graph[memory].3 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + + ''' +# --- +# name: test_conditional_state_graph[mongodb] + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_conditional_state_graph[mongodb].1 + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}' +# --- +# name: test_conditional_state_graph[mongodb].2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "langchain", + "schema", + "runnable", + "RunnableSequence" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "tools" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "data": "exit", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_state_graph[mongodb].3 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + + ''' +# --- +# name: test_conditional_state_graph_with_list_edge_inputs + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "A", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "A" + } + }, + { + "id": "B", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "B" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "A", + "target": "__end__" + }, + { + "source": "B", + "target": "__end__" + }, + { + "source": "__start__", + "target": "A" + }, + { + "source": "__start__", + "target": "B" + } + ] + } + ''' +# --- +# name: test_conditional_state_graph_with_list_edge_inputs.1 + ''' + graph TD; + A --> __end__; + B --> __end__; + __start__ --> A; + __start__ --> B; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge[memory] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query --> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge[mongodb] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query --> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory].1 + dict({ + 'definitions': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/definitions/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[mongodb] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[mongodb].1 + dict({ + 'definitions': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/definitions/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[mongodb].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].1 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb].1 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[memory] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[mongodb] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_message_graph[memory] + '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}' +# --- +# name: test_message_graph[memory].1 + '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}' +# --- +# name: test_message_graph[memory].2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "tests", + "test_pregel", + "FakeFuntionChatModel" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "prebuilt", + "tool_node", + "ToolNode" + ], + "name": "tools" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "data": "end", + "conditional": true + } + ] + } + ''' +# --- +# name: test_message_graph[memory].3 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  end  .-> __end__; + + ''' +# --- +# name: test_message_graph[mongodb] + '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}' +# --- +# name: test_message_graph[mongodb].1 + '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}' +# --- +# name: test_message_graph[mongodb].2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "tests", + "test_pregel", + "FakeFuntionChatModel" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "prebuilt", + "tool_node", + "ToolNode" + ], + "name": "tools" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "data": "end", + "conditional": true + } + ] + } + ''' +# --- +# name: test_message_graph[mongodb].3 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  end  .-> __end__; + + ''' +# --- +# name: test_multiple_sinks_subgraphs + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + uno(uno) + dos(dos) + subgraph_one(one) + subgraph_two(two) + subgraph_three(three) + __start__ --> uno; + uno -.-> dos; + uno -.-> subgraph_one; + subgraph subgraph + subgraph_one -.-> subgraph_two; + subgraph_one -.-> subgraph_three; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_nested_graph + ''' + graph TD; + __start__ --> inner; + inner --> side; + side --> __end__; + + ''' +# --- +# name: test_nested_graph.1 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + inner(inner) + side(side) + __end__([

__end__

]):::last + __start__ --> inner; + inner --> side; + side --> __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_nested_graph_xray + dict({ + 'edges': list([ + dict({ + 'conditional': True, + 'source': 'tool_two:__start__', + 'target': 'tool_two:tool_two_slow', + }), + dict({ + 'source': 'tool_two:tool_two_slow', + 'target': 'tool_two:__end__', + }), + dict({ + 'conditional': True, + 'source': 'tool_two:__start__', + 'target': 'tool_two:tool_two_fast', + }), + dict({ + 'source': 'tool_two:tool_two_fast', + 'target': 'tool_two:__end__', + }), + dict({ + 'conditional': True, + 'source': '__start__', + 'target': 'tool_one', + }), + dict({ + 'source': 'tool_one', + 'target': '__end__', + }), + dict({ + 'conditional': True, + 'source': '__start__', + 'target': 'tool_two:__start__', + }), + dict({ + 'source': 'tool_two:__end__', + 'target': '__end__', + }), + dict({ + 'conditional': True, + 'source': '__start__', + 'target': 'tool_three', + }), + dict({ + 'source': 'tool_three', + 'target': '__end__', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_one', + }), + 'id': 'tool_one', + 'type': 'runnable', + }), + dict({ + 'data': 'tool_two:__start__', + 'id': 'tool_two:__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_two:tool_two_slow', + }), + 'id': 'tool_two:tool_two_slow', + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_two:tool_two_fast', + }), + 'id': 'tool_two:tool_two_fast', + 'type': 'runnable', + }), + dict({ + 'data': 'tool_two:__end__', + 'id': 'tool_two:__end__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_three', + }), + 'id': 'tool_three', + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- +# name: test_nested_graph_xray.1 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + tool_one(tool_one) + tool_two___start__(

__start__

) + tool_two_tool_two_slow(tool_two_slow) + tool_two_tool_two_fast(tool_two_fast) + tool_two___end__(

__end__

) + tool_three(tool_three) + __end__([

__end__

]):::last + __start__ -.-> tool_one; + tool_one --> __end__; + __start__ -.-> tool_two___start__; + tool_two___end__ --> __end__; + __start__ -.-> tool_three; + tool_three --> __end__; + subgraph tool_two + tool_two___start__ -.-> tool_two_tool_two_slow; + tool_two_tool_two_slow --> tool_two___end__; + tool_two___start__ -.-> tool_two_tool_two_fast; + tool_two_tool_two_fast --> tool_two___end__; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_prebuilt_tool_chat + '{"$defs": {"BaseMessage": {"additionalProperties": true, "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "type"], "title": "BaseMessage", "type": "object"}}, "properties": {"messages": {"items": {"$ref": "#/$defs/BaseMessage"}, "title": "Messages", "type": "array"}}, "required": ["messages"], "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_prebuilt_tool_chat.1 + '{"$defs": {"BaseMessage": {"additionalProperties": true, "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "type"], "title": "BaseMessage", "type": "object"}}, "properties": {"messages": {"items": {"$ref": "#/$defs/BaseMessage"}, "title": "Messages", "type": "array"}}, "required": ["messages"], "title": "LangGraphOutput", "type": "object"}' +# --- +# name: test_prebuilt_tool_chat.2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "prebuilt", + "tool_node", + "ToolNode" + ], + "name": "tools" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "conditional": true + } + ] + } + ''' +# --- +# name: test_prebuilt_tool_chat.3 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.-> tools; + agent -.-> __end__; + + ''' +# --- +# name: test_repeat_condition + ''' + graph TD; + __start__ --> Researcher; + Researcher -.  continue  .-> Chart_Generator; + Researcher -.  call_tool  .-> Call_Tool; + Researcher -.  end  .-> __end__; + Chart_Generator -.  continue  .-> Researcher; + Chart_Generator -.  call_tool  .-> Call_Tool; + Chart_Generator -.  end  .-> __end__; + Call_Tool -.-> Researcher; + Call_Tool -.-> Chart_Generator; + Researcher -.  redo  .-> Researcher; + + ''' +# --- +# name: test_simple_multi_edge + ''' + graph TD; + __start__ --> up; + down --> __end__; + side --> down; + up --> down; + up --> other; + up --> side; + + ''' +# --- +# name: test_start_branch_then[memory] + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + tool_two_slow(tool_two_slow) + tool_two_fast(tool_two_fast) + __end__([

__end__

]):::last + __start__ -.-> tool_two_slow; + tool_two_slow --> __end__; + __start__ -.-> tool_two_fast; + tool_two_fast --> __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_start_branch_then[mongodb] + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + tool_two_slow(tool_two_slow) + tool_two_fast(tool_two_fast) + __end__([

__end__

]):::last + __start__ -.-> tool_two_slow; + tool_two_slow --> __end__; + __start__ -.-> tool_two_fast; + tool_two_fast --> __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_state_graph_w_config_inherited_state_keys + '{"$defs": {"Configurable": {"properties": {"tools": {"default": null, "items": {"type": "string"}, "title": "Tools", "type": "array"}}, "title": "Configurable", "type": "object"}}, "properties": {"configurable": {"$ref": "#/$defs/Configurable", "default": null}}, "title": "LangGraphConfig", "type": "object"}' +# --- +# name: test_state_graph_w_config_inherited_state_keys.1 + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "required": ["input"], "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_state_graph_w_config_inherited_state_keys.2 + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "required": ["input"], "title": "LangGraphOutput", "type": "object"}' +# --- +# name: test_weather_subgraph[memory] + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + router_node(router_node) + normal_llm_node(normal_llm_node) + weather_graph_model_node(model_node) + weather_graph_weather_node(weather_node
__interrupt = before) + __end__([

__end__

]):::last + __start__ --> router_node; + normal_llm_node --> __end__; + weather_graph_weather_node --> __end__; + router_node -.-> normal_llm_node; + router_node -.-> weather_graph_model_node; + router_node -.-> __end__; + subgraph weather_graph + weather_graph_model_node --> weather_graph_weather_node; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_weather_subgraph[mongodb] + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + router_node(router_node) + normal_llm_node(normal_llm_node) + weather_graph_model_node(model_node) + weather_graph_weather_node(weather_node
__interrupt = before) + __end__([

__end__

]):::last + __start__ --> router_node; + normal_llm_node --> __end__; + weather_graph_weather_node --> __end__; + router_node -.-> normal_llm_node; + router_node -.-> weather_graph_model_node; + router_node -.-> __end__; + subgraph weather_graph + weather_graph_model_node --> weather_graph_weather_node; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_xray_bool + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + gp_one(gp_one) + gp_two___start__(

__start__

) + gp_two_p_one(p_one) + gp_two_p_two___start__(

__start__

) + gp_two_p_two_c_one(c_one) + gp_two_p_two_c_two(c_two) + gp_two_p_two___end__(

__end__

) + gp_two___end__(

__end__

) + __end__([

__end__

]):::last + __start__ --> gp_one; + gp_two___end__ --> gp_one; + gp_one -.  0  .-> gp_two___start__; + gp_one -.  1  .-> __end__; + subgraph gp_two + gp_two___start__ --> gp_two_p_one; + gp_two_p_two___end__ --> gp_two_p_one; + gp_two_p_one -.  0  .-> gp_two_p_two___start__; + gp_two_p_one -.  1  .-> gp_two___end__; + subgraph p_two + gp_two_p_two___start__ --> gp_two_p_two_c_one; + gp_two_p_two_c_two --> gp_two_p_two_c_one; + gp_two_p_two_c_one -.  0  .-> gp_two_p_two_c_two; + gp_two_p_two_c_one -.  1  .-> gp_two_p_two___end__; + end + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_xray_issue + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + p_one(p_one) + p_two___start__(

__start__

) + p_two_c_one(c_one) + p_two_c_two(c_two) + p_two___end__(

__end__

) + __end__([

__end__

]):::last + __start__ --> p_one; + p_two___end__ --> p_one; + p_one -.  0  .-> p_two___start__; + p_one -.  1  .-> __end__; + subgraph p_two + p_two___start__ --> p_two_c_one; + p_two_c_two --> p_two_c_one; + p_two_c_one -.  0  .-> p_two_c_two; + p_two_c_one -.  1  .-> p_two___end__; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_xray_lance + dict({ + 'edges': list([ + dict({ + 'source': '__start__', + 'target': 'ask_question', + }), + dict({ + 'source': 'ask_question', + 'target': 'answer_question', + }), + dict({ + 'conditional': True, + 'source': 'answer_question', + 'target': 'ask_question', + }), + dict({ + 'conditional': True, + 'source': 'answer_question', + 'target': '__end__', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'ask_question', + }), + 'id': 'ask_question', + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'answer_question', + }), + 'id': 'answer_question', + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- +# name: test_xray_lance.1 + dict({ + 'edges': list([ + dict({ + 'source': '__start__', + 'target': 'generate_analysts', + }), + dict({ + 'source': 'conduct_interview', + 'target': 'generate_sections', + }), + dict({ + 'source': 'generate_sections', + 'target': '__end__', + }), + dict({ + 'conditional': True, + 'source': 'generate_analysts', + 'target': 'conduct_interview', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'generate_analysts', + }), + 'id': 'generate_analysts', + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'graph', + 'state', + 'CompiledStateGraph', + ]), + 'name': 'conduct_interview', + }), + 'id': 'conduct_interview', + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'generate_sections', + }), + 'id': 'generate_sections', + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- +# name: test_xray_lance.2 + dict({ + 'edges': list([ + dict({ + 'source': 'conduct_interview:__start__', + 'target': 'conduct_interview:ask_question', + }), + dict({ + 'source': 'conduct_interview:ask_question', + 'target': 'conduct_interview:answer_question', + }), + dict({ + 'conditional': True, + 'source': 'conduct_interview:answer_question', + 'target': 'conduct_interview:ask_question', + }), + dict({ + 'conditional': True, + 'source': 'conduct_interview:answer_question', + 'target': 'conduct_interview:__end__', + }), + dict({ + 'source': '__start__', + 'target': 'generate_analysts', + }), + dict({ + 'source': 'conduct_interview:__end__', + 'target': 'generate_sections', + }), + dict({ + 'source': 'generate_sections', + 'target': '__end__', + }), + dict({ + 'conditional': True, + 'source': 'generate_analysts', + 'target': 'conduct_interview:__start__', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'generate_analysts', + }), + 'id': 'generate_analysts', + 'type': 'runnable', + }), + dict({ + 'data': 'conduct_interview:__start__', + 'id': 'conduct_interview:__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'conduct_interview:ask_question', + }), + 'id': 'conduct_interview:ask_question', + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'conduct_interview:answer_question', + }), + 'id': 'conduct_interview:answer_question', + 'type': 'runnable', + }), + dict({ + 'data': 'conduct_interview:__end__', + 'id': 'conduct_interview:__end__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'generate_sections', + }), + 'id': 'generate_sections', + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- diff --git a/libs/langgraph-checkpoint-mongodb/tests/__snapshots__/test_pregel_async.ambr b/libs/langgraph-checkpoint-mongodb/tests/__snapshots__/test_pregel_async.ambr new file mode 100644 index 0000000..6edff52 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/__snapshots__/test_pregel_async.ambr @@ -0,0 +1,293 @@ +# serializer version: 1 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].1 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'answer': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Answer', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + 'docs', + ]), + 'title': 'State', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].2 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'answer': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Answer', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + 'docs', + ]), + 'title': 'State', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb_aio] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb_aio].1 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'answer': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Answer', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + 'docs', + ]), + 'title': 'State', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb_aio].2 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'answer': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Answer', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + 'docs', + ]), + 'title': 'State', + 'type': 'object', + }) +# --- +# name: test_weather_subgraph[memory] + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + router_node(router_node) + normal_llm_node(normal_llm_node) + weather_graph_model_node(model_node) + weather_graph_weather_node(weather_node
__interrupt = before) + __end__([

__end__

]):::last + __start__ --> router_node; + normal_llm_node --> __end__; + weather_graph_weather_node --> __end__; + router_node -.-> normal_llm_node; + router_node -.-> weather_graph_model_node; + router_node -.-> __end__; + subgraph weather_graph + weather_graph_model_node --> weather_graph_weather_node; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_weather_subgraph[mongodb_aio] + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + router_node(router_node) + normal_llm_node(normal_llm_node) + weather_graph_model_node(model_node) + weather_graph_weather_node(weather_node
__interrupt = before) + __end__([

__end__

]):::last + __start__ --> router_node; + normal_llm_node --> __end__; + weather_graph_weather_node --> __end__; + router_node -.-> normal_llm_node; + router_node -.-> weather_graph_model_node; + router_node -.-> __end__; + subgraph weather_graph + weather_graph_model_node --> weather_graph_weather_node; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/__init__.py b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/__snapshots__/test_pregel.ambr b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/__snapshots__/test_pregel.ambr new file mode 100644 index 0000000..247b3ae --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/__snapshots__/test_pregel.ambr @@ -0,0 +1,1632 @@ +# serializer version: 1 +# name: test_branch_then[mongodb] + ''' + graph TD; + __start__ --> prepare; + finish --> __end__; + prepare -.-> tool_two_slow; + tool_two_slow --> finish; + prepare -.-> tool_two_fast; + tool_two_fast --> finish; + + ''' +# --- +# name: test_branch_then[mongodb].1 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + prepare(prepare) + tool_two_slow(tool_two_slow) + tool_two_fast(tool_two_fast) + finish(finish) + __end__([

__end__

]):::last + __start__ --> prepare; + finish --> __end__; + prepare -.-> tool_two_slow; + tool_two_slow --> finish; + prepare -.-> tool_two_fast; + tool_two_fast --> finish; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_conditional_entrypoint_graph + '{"title": "LangGraphInput"}' +# --- +# name: test_conditional_entrypoint_graph.1 + '{"title": "LangGraphOutput"}' +# --- +# name: test_conditional_entrypoint_graph.2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "left", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "left" + } + }, + { + "id": "right", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "right" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "right", + "target": "__end__" + }, + { + "source": "__start__", + "target": "left", + "data": "go-left", + "conditional": true + }, + { + "source": "__start__", + "target": "right", + "data": "go-right", + "conditional": true + }, + { + "source": "left", + "target": "__end__", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_entrypoint_graph.3 + ''' + graph TD; + right --> __end__; + __start__ -.  go-left  .-> left; + __start__ -.  go-right  .-> right; + left -.-> __end__; + + ''' +# --- +# name: test_conditional_entrypoint_graph_state + '{"properties": {"input": {"default": null, "title": "Input", "type": "string"}, "output": {"default": null, "title": "Output", "type": "string"}, "steps": {"default": null, "items": {"type": "string"}, "title": "Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_conditional_entrypoint_graph_state.1 + '{"properties": {"input": {"default": null, "title": "Input", "type": "string"}, "output": {"default": null, "title": "Output", "type": "string"}, "steps": {"default": null, "items": {"type": "string"}, "title": "Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}' +# --- +# name: test_conditional_entrypoint_graph_state.2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "left", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "left" + } + }, + { + "id": "right", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "right" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "right", + "target": "__end__" + }, + { + "source": "__start__", + "target": "left", + "data": "go-left", + "conditional": true + }, + { + "source": "__start__", + "target": "right", + "data": "go-right", + "conditional": true + }, + { + "source": "left", + "target": "__end__", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_entrypoint_graph_state.3 + ''' + graph TD; + right --> __end__; + __start__ -.  go-left  .-> left; + __start__ -.  go-right  .-> right; + left -.-> __end__; + + ''' +# --- +# name: test_conditional_entrypoint_to_multiple_state_graph + '{"properties": {"locations": {"items": {"type": "string"}, "title": "Locations", "type": "array"}, "results": {"items": {"type": "string"}, "title": "Results", "type": "array"}}, "required": ["locations", "results"], "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_conditional_entrypoint_to_multiple_state_graph.1 + '{"properties": {"locations": {"items": {"type": "string"}, "title": "Locations", "type": "array"}, "results": {"items": {"type": "string"}, "title": "Results", "type": "array"}}, "required": ["locations", "results"], "title": "LangGraphOutput", "type": "object"}' +# --- +# name: test_conditional_entrypoint_to_multiple_state_graph.2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "get_weather", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "get_weather" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "get_weather", + "target": "__end__" + }, + { + "source": "__start__", + "target": "get_weather", + "conditional": true + }, + { + "source": "__start__", + "target": "__end__", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_entrypoint_to_multiple_state_graph.3 + ''' + graph TD; + get_weather --> __end__; + __start__ -.-> get_weather; + __start__ -.-> __end__; + + ''' +# --- +# name: test_conditional_graph[mongodb] + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "langchain", + "schema", + "runnable", + "RunnableAssign" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "tools" + }, + "metadata": { + "parents": {}, + "version": 2, + "variant": "b" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "data": "exit", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_graph[mongodb].1 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + + ''' +# --- +# name: test_conditional_graph[mongodb].2 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + agent(agent) + tools(tools
parents = {} + version = 2 + variant = b) + __end__([

__end__

]):::last + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_conditional_graph[mongodb].3 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "langchain", + "schema", + "runnable", + "RunnableAssign" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "tools" + }, + "metadata": { + "parents": {}, + "version": 2, + "variant": "b" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "data": "exit", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_graph[mongodb].4 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + + ''' +# --- +# name: test_conditional_graph[mongodb].5 + dict({ + 'edges': list([ + dict({ + 'source': '__start__', + 'target': 'agent', + }), + dict({ + 'source': 'tools', + 'target': 'agent', + }), + dict({ + 'conditional': True, + 'data': 'continue', + 'source': 'agent', + 'target': 'tools', + }), + dict({ + 'conditional': True, + 'data': 'exit', + 'source': 'agent', + 'target': '__end__', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langchain', + 'schema', + 'runnable', + 'RunnableAssign', + ]), + 'name': 'agent', + }), + 'id': 'agent', + 'metadata': dict({ + '__interrupt': 'after', + }), + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tools', + }), + 'id': 'tools', + 'metadata': dict({ + 'parents': dict({ + }), + 'variant': 'b', + 'version': 2, + }), + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- +# name: test_conditional_graph[mongodb].6 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + agent(agent
__interrupt = after) + tools(tools
parents = {} + version = 2 + variant = b) + __end__([

__end__

]):::last + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_conditional_state_graph[mongodb] + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_conditional_state_graph[mongodb].1 + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}' +# --- +# name: test_conditional_state_graph[mongodb].2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "langchain", + "schema", + "runnable", + "RunnableSequence" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "tools" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "data": "exit", + "conditional": true + } + ] + } + ''' +# --- +# name: test_conditional_state_graph[mongodb].3 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  exit  .-> __end__; + + ''' +# --- +# name: test_conditional_state_graph_with_list_edge_inputs + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "A", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "A" + } + }, + { + "id": "B", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "B" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "A", + "target": "__end__" + }, + { + "source": "B", + "target": "__end__" + }, + { + "source": "__start__", + "target": "A" + }, + { + "source": "__start__", + "target": "B" + } + ] + } + ''' +# --- +# name: test_conditional_state_graph_with_list_edge_inputs.1 + ''' + graph TD; + A --> __end__; + B --> __end__; + __start__ --> A; + __start__ --> B; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge[mongodb] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query --> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[mongodb] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[mongodb].1 + dict({ + 'definitions': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/definitions/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[mongodb].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb].1 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[mongodb] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_message_graph[mongodb] + '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}' +# --- +# name: test_message_graph[mongodb].1 + '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}' +# --- +# name: test_message_graph[mongodb].2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "integration_tests", + "test_pregel", + "FakeFuntionChatModel" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "prebuilt", + "tool_node", + "ToolNode" + ], + "name": "tools" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "data": "continue", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "data": "end", + "conditional": true + } + ] + } + ''' +# --- +# name: test_message_graph[mongodb].3 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.  continue  .-> tools; + agent -.  end  .-> __end__; + + ''' +# --- +# name: test_multiple_sinks_subgraphs + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + uno(uno) + dos(dos) + subgraph_one(one) + subgraph_two(two) + subgraph_three(three) + __start__ --> uno; + uno -.-> dos; + uno -.-> subgraph_one; + subgraph subgraph + subgraph_one -.-> subgraph_two; + subgraph_one -.-> subgraph_three; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_nested_graph + ''' + graph TD; + __start__ --> inner; + inner --> side; + side --> __end__; + + ''' +# --- +# name: test_nested_graph.1 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + inner(inner) + side(side) + __end__([

__end__

]):::last + __start__ --> inner; + inner --> side; + side --> __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_nested_graph_xray + dict({ + 'edges': list([ + dict({ + 'conditional': True, + 'source': 'tool_two:__start__', + 'target': 'tool_two:tool_two_slow', + }), + dict({ + 'source': 'tool_two:tool_two_slow', + 'target': 'tool_two:__end__', + }), + dict({ + 'conditional': True, + 'source': 'tool_two:__start__', + 'target': 'tool_two:tool_two_fast', + }), + dict({ + 'source': 'tool_two:tool_two_fast', + 'target': 'tool_two:__end__', + }), + dict({ + 'conditional': True, + 'source': '__start__', + 'target': 'tool_one', + }), + dict({ + 'source': 'tool_one', + 'target': '__end__', + }), + dict({ + 'conditional': True, + 'source': '__start__', + 'target': 'tool_two:__start__', + }), + dict({ + 'source': 'tool_two:__end__', + 'target': '__end__', + }), + dict({ + 'conditional': True, + 'source': '__start__', + 'target': 'tool_three', + }), + dict({ + 'source': 'tool_three', + 'target': '__end__', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_one', + }), + 'id': 'tool_one', + 'type': 'runnable', + }), + dict({ + 'data': 'tool_two:__start__', + 'id': 'tool_two:__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_two:tool_two_slow', + }), + 'id': 'tool_two:tool_two_slow', + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_two:tool_two_fast', + }), + 'id': 'tool_two:tool_two_fast', + 'type': 'runnable', + }), + dict({ + 'data': 'tool_two:__end__', + 'id': 'tool_two:__end__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_three', + }), + 'id': 'tool_three', + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- +# name: test_nested_graph_xray.1 + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + tool_one(tool_one) + tool_two___start__(

__start__

) + tool_two_tool_two_slow(tool_two_slow) + tool_two_tool_two_fast(tool_two_fast) + tool_two___end__(

__end__

) + tool_three(tool_three) + __end__([

__end__

]):::last + __start__ -.-> tool_one; + tool_one --> __end__; + __start__ -.-> tool_two___start__; + tool_two___end__ --> __end__; + __start__ -.-> tool_three; + tool_three --> __end__; + subgraph tool_two + tool_two___start__ -.-> tool_two_tool_two_slow; + tool_two_tool_two_slow --> tool_two___end__; + tool_two___start__ -.-> tool_two_tool_two_fast; + tool_two_tool_two_fast --> tool_two___end__; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_prebuilt_tool_chat + '{"$defs": {"BaseMessage": {"additionalProperties": true, "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "type"], "title": "BaseMessage", "type": "object"}}, "properties": {"messages": {"items": {"$ref": "#/$defs/BaseMessage"}, "title": "Messages", "type": "array"}}, "required": ["messages"], "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_prebuilt_tool_chat.1 + '{"$defs": {"BaseMessage": {"additionalProperties": true, "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "type"], "title": "BaseMessage", "type": "object"}}, "properties": {"messages": {"items": {"$ref": "#/$defs/BaseMessage"}, "title": "Messages", "type": "array"}}, "required": ["messages"], "title": "LangGraphOutput", "type": "object"}' +# --- +# name: test_prebuilt_tool_chat.2 + ''' + { + "nodes": [ + { + "id": "__start__", + "type": "schema", + "data": "__start__" + }, + { + "id": "agent", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "utils", + "runnable", + "RunnableCallable" + ], + "name": "agent" + } + }, + { + "id": "tools", + "type": "runnable", + "data": { + "id": [ + "langgraph", + "prebuilt", + "tool_node", + "ToolNode" + ], + "name": "tools" + } + }, + { + "id": "__end__", + "type": "schema", + "data": "__end__" + } + ], + "edges": [ + { + "source": "__start__", + "target": "agent" + }, + { + "source": "tools", + "target": "agent" + }, + { + "source": "agent", + "target": "tools", + "conditional": true + }, + { + "source": "agent", + "target": "__end__", + "conditional": true + } + ] + } + ''' +# --- +# name: test_prebuilt_tool_chat.3 + ''' + graph TD; + __start__ --> agent; + tools --> agent; + agent -.-> tools; + agent -.-> __end__; + + ''' +# --- +# name: test_repeat_condition + ''' + graph TD; + __start__ --> Researcher; + Researcher -.  continue  .-> Chart_Generator; + Researcher -.  call_tool  .-> Call_Tool; + Researcher -.  end  .-> __end__; + Chart_Generator -.  continue  .-> Researcher; + Chart_Generator -.  call_tool  .-> Call_Tool; + Chart_Generator -.  end  .-> __end__; + Call_Tool -.-> Researcher; + Call_Tool -.-> Chart_Generator; + Researcher -.  redo  .-> Researcher; + + ''' +# --- +# name: test_send_react_interrupt_control[mongodb] + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + agent(agent) + foo([foo]):::last + __start__ --> agent; + agent -.-> foo; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_simple_multi_edge + ''' + graph TD; + __start__ --> up; + down --> __end__; + side --> down; + up --> down; + up --> other; + up --> side; + + ''' +# --- +# name: test_start_branch_then[mongodb] + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + tool_two_slow(tool_two_slow) + tool_two_fast(tool_two_fast) + __end__([

__end__

]):::last + __start__ -.-> tool_two_slow; + tool_two_slow --> __end__; + __start__ -.-> tool_two_fast; + tool_two_fast --> __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_state_graph_w_config_inherited_state_keys + '{"$defs": {"Configurable": {"properties": {"tools": {"default": null, "items": {"type": "string"}, "title": "Tools", "type": "array"}}, "title": "Configurable", "type": "object"}}, "properties": {"configurable": {"$ref": "#/$defs/Configurable", "default": null}}, "title": "LangGraphConfig", "type": "object"}' +# --- +# name: test_state_graph_w_config_inherited_state_keys.1 + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "required": ["input"], "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_state_graph_w_config_inherited_state_keys.2 + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "required": ["input"], "title": "LangGraphOutput", "type": "object"}' +# --- +# name: test_weather_subgraph[mongodb] + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + router_node(router_node) + normal_llm_node(normal_llm_node) + weather_graph_model_node(model_node) + weather_graph_weather_node(weather_node
__interrupt = before) + __end__([

__end__

]):::last + __start__ --> router_node; + normal_llm_node --> __end__; + weather_graph_weather_node --> __end__; + router_node -.-> normal_llm_node; + router_node -.-> weather_graph_model_node; + router_node -.-> __end__; + subgraph weather_graph + weather_graph_model_node --> weather_graph_weather_node; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_xray_bool + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + gp_one(gp_one) + gp_two___start__(

__start__

) + gp_two_p_one(p_one) + gp_two_p_two___start__(

__start__

) + gp_two_p_two_c_one(c_one) + gp_two_p_two_c_two(c_two) + gp_two_p_two___end__(

__end__

) + gp_two___end__(

__end__

) + __end__([

__end__

]):::last + __start__ --> gp_one; + gp_two___end__ --> gp_one; + gp_one -.  0  .-> gp_two___start__; + gp_one -.  1  .-> __end__; + subgraph gp_two + gp_two___start__ --> gp_two_p_one; + gp_two_p_two___end__ --> gp_two_p_one; + gp_two_p_one -.  0  .-> gp_two_p_two___start__; + gp_two_p_one -.  1  .-> gp_two___end__; + subgraph p_two + gp_two_p_two___start__ --> gp_two_p_two_c_one; + gp_two_p_two_c_two --> gp_two_p_two_c_one; + gp_two_p_two_c_one -.  0  .-> gp_two_p_two_c_two; + gp_two_p_two_c_one -.  1  .-> gp_two_p_two___end__; + end + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_xray_issue + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + p_one(p_one) + p_two___start__(

__start__

) + p_two_c_one(c_one) + p_two_c_two(c_two) + p_two___end__(

__end__

) + __end__([

__end__

]):::last + __start__ --> p_one; + p_two___end__ --> p_one; + p_one -.  0  .-> p_two___start__; + p_one -.  1  .-> __end__; + subgraph p_two + p_two___start__ --> p_two_c_one; + p_two_c_two --> p_two_c_one; + p_two_c_one -.  0  .-> p_two_c_two; + p_two_c_one -.  1  .-> p_two___end__; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_xray_lance + dict({ + 'edges': list([ + dict({ + 'source': '__start__', + 'target': 'ask_question', + }), + dict({ + 'source': 'ask_question', + 'target': 'answer_question', + }), + dict({ + 'conditional': True, + 'source': 'answer_question', + 'target': 'ask_question', + }), + dict({ + 'conditional': True, + 'source': 'answer_question', + 'target': '__end__', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'ask_question', + }), + 'id': 'ask_question', + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'answer_question', + }), + 'id': 'answer_question', + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- +# name: test_xray_lance.1 + dict({ + 'edges': list([ + dict({ + 'source': '__start__', + 'target': 'generate_analysts', + }), + dict({ + 'source': 'conduct_interview', + 'target': 'generate_sections', + }), + dict({ + 'source': 'generate_sections', + 'target': '__end__', + }), + dict({ + 'conditional': True, + 'source': 'generate_analysts', + 'target': 'conduct_interview', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'generate_analysts', + }), + 'id': 'generate_analysts', + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'graph', + 'state', + 'CompiledStateGraph', + ]), + 'name': 'conduct_interview', + }), + 'id': 'conduct_interview', + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'generate_sections', + }), + 'id': 'generate_sections', + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- +# name: test_xray_lance.2 + dict({ + 'edges': list([ + dict({ + 'source': 'conduct_interview:__start__', + 'target': 'conduct_interview:ask_question', + }), + dict({ + 'source': 'conduct_interview:ask_question', + 'target': 'conduct_interview:answer_question', + }), + dict({ + 'conditional': True, + 'source': 'conduct_interview:answer_question', + 'target': 'conduct_interview:ask_question', + }), + dict({ + 'conditional': True, + 'source': 'conduct_interview:answer_question', + 'target': 'conduct_interview:__end__', + }), + dict({ + 'source': '__start__', + 'target': 'generate_analysts', + }), + dict({ + 'source': 'conduct_interview:__end__', + 'target': 'generate_sections', + }), + dict({ + 'source': 'generate_sections', + 'target': '__end__', + }), + dict({ + 'conditional': True, + 'source': 'generate_analysts', + 'target': 'conduct_interview:__start__', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'generate_analysts', + }), + 'id': 'generate_analysts', + 'type': 'runnable', + }), + dict({ + 'data': 'conduct_interview:__start__', + 'id': 'conduct_interview:__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'conduct_interview:ask_question', + }), + 'id': 'conduct_interview:ask_question', + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'conduct_interview:answer_question', + }), + 'id': 'conduct_interview:answer_question', + 'type': 'runnable', + }), + dict({ + 'data': 'conduct_interview:__end__', + 'id': 'conduct_interview:__end__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'generate_sections', + }), + 'id': 'generate_sections', + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/__snapshots__/test_pregel_async.ambr b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/__snapshots__/test_pregel_async.ambr new file mode 100644 index 0000000..83172f1 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/__snapshots__/test_pregel_async.ambr @@ -0,0 +1,162 @@ +# serializer version: 1 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb_aio] + ''' + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; + + ''' +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb_aio].1 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'answer': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Answer', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + 'docs', + ]), + 'title': 'State', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[mongodb_aio].2 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'answer': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Answer', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + 'docs', + ]), + 'title': 'State', + 'type': 'object', + }) +# --- +# name: test_send_react_interrupt_control[mongodb_aio] + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + agent(agent) + foo([foo]):::last + __start__ --> agent; + agent -.-> foo; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- +# name: test_weather_subgraph[mongodb_aio] + ''' + %%{init: {'flowchart': {'curve': 'linear'}}}%% + graph TD; + __start__([

__start__

]):::first + router_node(router_node) + normal_llm_node(normal_llm_node) + weather_graph_model_node(model_node) + weather_graph_weather_node(weather_node
__interrupt = before) + __end__([

__end__

]):::last + __start__ --> router_node; + normal_llm_node --> __end__; + weather_graph_weather_node --> __end__; + router_node -.-> normal_llm_node; + router_node -.-> weather_graph_model_node; + router_node -.-> __end__; + subgraph weather_graph + weather_graph_model_node --> weather_graph_weather_node; + end + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc + + ''' +# --- diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/any_str.py b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/any_str.py new file mode 100644 index 0000000..5643a00 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/any_str.py @@ -0,0 +1,86 @@ +import re +from typing import Any, Sequence, Union + +from typing_extensions import Self + + +class FloatBetween(float): + def __new__(cls, min_value: float, max_value: float) -> Self: + return super().__new__(cls, min_value) + + def __init__(self, min_value: float, max_value: float) -> None: + super().__init__() + self.min_value = min_value + self.max_value = max_value + + def __eq__(self, other: object) -> bool: + return ( + isinstance(other, float) + and other >= self.min_value + and other <= self.max_value + ) + + def __hash__(self) -> int: + return hash((float(self), self.min_value, self.max_value)) + + +class AnyStr(str): + def __init__(self, prefix: Union[str, re.Pattern] = "") -> None: + super().__init__() + self.prefix = prefix + + def __eq__(self, other: object) -> bool: + return isinstance(other, str) and ( + other.startswith(self.prefix) + if isinstance(self.prefix, str) + else self.prefix.match(other) + ) + + def __hash__(self) -> int: + return hash((str(self), self.prefix)) + + +class AnyDict(dict): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, dict) or len(self) != len(other): + return False + for k, v in self.items(): + if kk := next((kk for kk in other if kk == k), None): + if v == other[kk]: + continue + else: + return False + else: + return True + + +class AnyVersion: + def __init__(self) -> None: + super().__init__() + + def __eq__(self, other: object) -> bool: + return isinstance(other, (str, int, float)) + + def __hash__(self) -> int: + return hash(str(self)) + + +class UnsortedSequence: + def __init__(self, *values: Any) -> None: + self.seq = values + + def __eq__(self, value: object) -> bool: + return ( + isinstance(value, Sequence) + and len(self.seq) == len(value) + and all(a in value for a in self.seq) + ) + + def __hash__(self) -> int: + return hash(frozenset(self.seq)) + + def __repr__(self) -> str: + return repr(self.seq) diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/compose-postgres.yml b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/compose-postgres.yml new file mode 100644 index 0000000..221b35d --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/compose-postgres.yml @@ -0,0 +1,17 @@ +name: langgraph-tests +services: + postgres-test: + image: postgres:16 + ports: + - "5442:5432" + environment: + POSTGRES_DB: postgres + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + healthcheck: + test: pg_isready -U postgres + start_period: 10s + timeout: 1s + retries: 5 + interval: 60s + start_interval: 1s diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/conftest.py b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/conftest.py new file mode 100644 index 0000000..7ebee89 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/conftest.py @@ -0,0 +1,119 @@ +import os +from contextlib import asynccontextmanager +from typing import AsyncIterator, Optional +from uuid import UUID + +import pytest +from langchain_core import __version__ as core_version +from packaging import version +from pytest_mock import MockerFixture + +from langgraph.checkpoint.base import BaseCheckpointSaver +from langgraph.checkpoint.mongodb import MongoDBSaver +from langgraph.checkpoint.mongodb.aio import AsyncMongoDBSaver +from langgraph.store.base import BaseStore +from langgraph.store.memory import InMemoryStore + +pytest.register_assert_rewrite("tests.memory_assert") + +DEFAULT_POSTGRES_URI = "postgres://postgres:postgres@localhost:5442/" +# TODO: fix this once core is released +IS_LANGCHAIN_CORE_030_OR_GREATER = version.parse(core_version) >= version.parse( + "0.3.0.dev0" +) +SHOULD_CHECK_SNAPSHOTS = IS_LANGCHAIN_CORE_030_OR_GREATER + + +@pytest.fixture +def anyio_backend(): + return "asyncio" + + +@pytest.fixture() +def deterministic_uuids(mocker: MockerFixture) -> MockerFixture: + side_effect = ( + UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000) + ) + return mocker.patch("uuid.uuid4", side_effect=side_effect) + + +# checkpointer fixtures + + +@pytest.fixture(scope="function") +def checkpointer_memory(): + from .memory_assert import MemorySaverAssertImmutable + + yield MemorySaverAssertImmutable() + + +@pytest.fixture +def checkpointer_mongodb(): + """Fresh checkpointer without any memories.""" + with MongoDBSaver.from_conn_string( + os.environ.get("MONGODB_URI", "mongodb://localhost:27017"), + os.environ.get("DATABASE_NAME", "langchain_checkpoints_db"), + ) as checkpointer: + checkpointer.checkpoint_collection.delete_many({}) + checkpointer.writes_collection.delete_many({}) + yield checkpointer + + +@asynccontextmanager +async def _checkpointer_mongodb_aio(): + async with AsyncMongoDBSaver.from_conn_string( + os.environ.get("MONGODB_URI", "mongodb://localhost:27017"), + os.environ.get("DATABASE_NAME", "langchain_checkpoints_db"), + ) as checkpointer: + await checkpointer.checkpoint_collection.delete_many({}) + await checkpointer.writes_collection.delete_many({}) + yield checkpointer + + +@asynccontextmanager +async def awith_checkpointer( + checkpointer_name: Optional[str], +) -> AsyncIterator[BaseCheckpointSaver]: + if checkpointer_name is None: + yield None + elif checkpointer_name == "memory": + from .memory_assert import MemorySaverAssertImmutable + + yield MemorySaverAssertImmutable() + elif checkpointer_name == "mongodb_aio": + async with _checkpointer_mongodb_aio() as checkpointer: + yield checkpointer + else: + raise NotImplementedError(f"Unknown checkpointer: {checkpointer_name}") + + +@pytest.fixture(scope="function") +def store_in_memory(): + yield InMemoryStore() + + +@asynccontextmanager +async def awith_store(store_name: Optional[str]) -> AsyncIterator[BaseStore]: + if store_name is None: + yield None + elif store_name == "in_memory": + yield InMemoryStore() + else: + raise NotImplementedError(f"Unknown store {store_name}") + + +ALL_CHECKPOINTERS_SYNC = [ + # "memory", + "mongodb", +] +ALL_CHECKPOINTERS_ASYNC = [ + # "memory", + "mongodb_aio", +] + +ALL_CHECKPOINTERS_ASYNC_PLUS_NONE = [ + *ALL_CHECKPOINTERS_ASYNC, + None, +] +ALL_STORES_SYNC = ["in_memory"] +ALL_STORES_ASYNC = ["in_memory"] diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/fake_chat.py b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/fake_chat.py new file mode 100644 index 0000000..c2a6b9b --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/fake_chat.py @@ -0,0 +1,86 @@ +import re +from typing import Any, Iterator, List, Optional, cast + +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.language_models.fake_chat_models import GenericFakeChatModel +from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage +from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult + + +class FakeChatModel(GenericFakeChatModel): + messages: list[BaseMessage] + + i: int = 0 + + def bind_tools(self, functions: list): + return self + + def _generate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + """Top Level call""" + if self.i >= len(self.messages): + self.i = 0 + message = self.messages[self.i] + self.i += 1 + if isinstance(message, str): + message_ = AIMessage(content=message) + else: + if hasattr(message, "model_copy"): + message_ = message.model_copy() + else: + message_ = message.copy() + generation = ChatGeneration(message=message_) + return ChatResult(generations=[generation]) + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + """Stream the output of the model.""" + chat_result = self._generate( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + if not isinstance(chat_result, ChatResult): + raise ValueError( + f"Expected generate to return a ChatResult, " + f"but got {type(chat_result)} instead." + ) + + message = chat_result.generations[0].message + + if not isinstance(message, AIMessage): + raise ValueError( + f"Expected invoke to return an AIMessage, " + f"but got {type(message)} instead." + ) + + content = message.content + + if content: + # Use a regular expression to split on whitespace with a capture group + # so that we can preserve the whitespace in the output. + assert isinstance(content, str) + content_chunks = cast(list[str], re.split(r"(\s)", content)) + + for token in content_chunks: + chunk = ChatGenerationChunk( + message=AIMessageChunk(content=token, id=message.id) + ) + if run_manager: + run_manager.on_llm_new_token(token, chunk=chunk) + yield chunk + else: + args = message.__dict__ + args.pop("type") + chunk = ChatGenerationChunk(message=AIMessageChunk(**args)) + if run_manager: + run_manager.on_llm_new_token("", chunk=chunk) + yield chunk diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/fake_tracer.py b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/fake_tracer.py new file mode 100644 index 0000000..28ecc88 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/fake_tracer.py @@ -0,0 +1,91 @@ +from typing import Any, Optional +from uuid import UUID + +from langchain_core.messages.base import BaseMessage +from langchain_core.outputs.chat_generation import ChatGeneration +from langchain_core.outputs.llm_result import LLMResult +from langchain_core.tracers import BaseTracer, Run + + +class FakeTracer(BaseTracer): + """Fake tracer that records LangChain execution. + It replaces run ids with deterministic UUIDs for snapshotting.""" + + def __init__(self) -> None: + """Initialize the tracer.""" + super().__init__() + self.runs: list[Run] = [] + self.uuids_map: dict[UUID, UUID] = {} + self.uuids_generator = ( + UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000) + ) + + def _replace_uuid(self, uuid: UUID) -> UUID: + if uuid not in self.uuids_map: + self.uuids_map[uuid] = next(self.uuids_generator) + return self.uuids_map[uuid] + + def _replace_message_id(self, maybe_message: Any) -> Any: + if isinstance(maybe_message, BaseMessage): + maybe_message.id = str(next(self.uuids_generator)) + if isinstance(maybe_message, ChatGeneration): + maybe_message.message.id = str(next(self.uuids_generator)) + if isinstance(maybe_message, LLMResult): + for i, gen_list in enumerate(maybe_message.generations): + for j, gen in enumerate(gen_list): + maybe_message.generations[i][j] = self._replace_message_id(gen) + if isinstance(maybe_message, dict): + for k, v in maybe_message.items(): + maybe_message[k] = self._replace_message_id(v) + if isinstance(maybe_message, list): + for i, v in enumerate(maybe_message): + maybe_message[i] = self._replace_message_id(v) + + return maybe_message + + def _copy_run(self, run: Run) -> Run: + if run.dotted_order: + levels = run.dotted_order.split(".") + processed_levels = [] + for level in levels: + timestamp, run_id = level.split("Z") + new_run_id = self._replace_uuid(UUID(run_id)) + processed_level = f"{timestamp}Z{new_run_id}" + processed_levels.append(processed_level) + new_dotted_order = ".".join(processed_levels) + else: + new_dotted_order = None + return run.copy( + update={ + "id": self._replace_uuid(run.id), + "parent_run_id": ( + self.uuids_map[run.parent_run_id] if run.parent_run_id else None + ), + "child_runs": [self._copy_run(child) for child in run.child_runs], + "trace_id": self._replace_uuid(run.trace_id) if run.trace_id else None, + "dotted_order": new_dotted_order, + "inputs": self._replace_message_id(run.inputs), + "outputs": self._replace_message_id(run.outputs), + } + ) + + def _persist_run(self, run: Run) -> None: + """Persist a run.""" + + self.runs.append(self._copy_run(run)) + + def flattened_runs(self) -> list[Run]: + q = [] + self.runs + result = [] + while q: + parent = q.pop() + result.append(parent) + if parent.child_runs: + q.extend(parent.child_runs) + return result + + @property + def run_ids(self) -> list[Optional[UUID]]: + runs = self.flattened_runs() + uuids_map = {v: k for k, v in self.uuids_map.items()} + return [uuids_map.get(r.id) for r in runs] diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/memory_assert.py b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/memory_assert.py new file mode 100644 index 0000000..6b44051 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/memory_assert.py @@ -0,0 +1,127 @@ +import asyncio +from collections import defaultdict +from typing import Any, Optional + +from langchain_core.runnables import RunnableConfig + +from langgraph.checkpoint.base import ( + ChannelVersions, + Checkpoint, + CheckpointMetadata, + CheckpointTuple, + SerializerProtocol, + copy_checkpoint, +) +from langgraph.checkpoint.memory import MemorySaver + + +class NoopSerializer(SerializerProtocol): + def loads_typed(self, data: tuple[str, bytes]) -> Any: + return data[1] + + def dumps_typed(self, obj: Any) -> tuple[str, bytes]: + return "type", obj + + +class MemorySaverAssertImmutable(MemorySaver): + storage_for_copies: defaultdict[str, dict[str, dict[str, Checkpoint]]] + + def __init__( + self, + *, + serde: Optional[SerializerProtocol] = None, + put_sleep: Optional[float] = None, + ) -> None: + super().__init__(serde=serde) + self.storage_for_copies = defaultdict(lambda: defaultdict(dict)) + self.put_sleep = put_sleep + + def put( + self, + config: dict, + checkpoint: Checkpoint, + metadata: CheckpointMetadata, + new_versions: ChannelVersions, + ) -> None: + if self.put_sleep: + import time + + time.sleep(self.put_sleep) + # assert checkpoint hasn't been modified since last written + thread_id = config["configurable"]["thread_id"] + checkpoint_ns = config["configurable"]["checkpoint_ns"] + if saved := super().get(config): + assert ( + self.serde.loads_typed( + self.storage_for_copies[thread_id][checkpoint_ns][saved["id"]] + ) + == saved + ) + self.storage_for_copies[thread_id][checkpoint_ns][checkpoint["id"]] = ( + self.serde.dumps_typed(copy_checkpoint(checkpoint)) + ) + # call super to write checkpoint + return super().put(config, checkpoint, metadata, new_versions) + + +class MemorySaverAssertCheckpointMetadata(MemorySaver): + """This custom checkpointer is for verifying that a run's configurable + fields are merged with the previous checkpoint config for each step in + the run. This is the desired behavior. Because the checkpointer's (a)put() + method is called for each step, the implementation of this checkpointer + should produce a side effect that can be asserted. + """ + + def put( + self, + config: RunnableConfig, + checkpoint: Checkpoint, + metadata: CheckpointMetadata, + new_versions: ChannelVersions, + ) -> None: + """The implementation of put() merges config["configurable"] (a run's + configurable fields) with the metadata field. The state of the + checkpoint metadata can be asserted to confirm that the run's + configurable fields were merged with the previous checkpoint config. + """ + configurable = config["configurable"].copy() + + # remove checkpoint_id to make testing simpler + checkpoint_id = configurable.pop("checkpoint_id", None) + thread_id = config["configurable"]["thread_id"] + checkpoint_ns = config["configurable"]["checkpoint_ns"] + self.storage[thread_id][checkpoint_ns].update( + { + checkpoint["id"]: ( + self.serde.dumps_typed(checkpoint), + # merge configurable fields and metadata + self.serde.dumps_typed({**configurable, **metadata}), + checkpoint_id, + ) + } + ) + return { + "configurable": { + "thread_id": config["configurable"]["thread_id"], + "checkpoint_id": checkpoint["id"], + } + } + + async def aput( + self, + config: RunnableConfig, + checkpoint: Checkpoint, + metadata: CheckpointMetadata, + new_versions: ChannelVersions, + ) -> RunnableConfig: + return await asyncio.get_running_loop().run_in_executor( + None, self.put, config, checkpoint, metadata, new_versions + ) + + +class MemorySaverNoPending(MemorySaver): + def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: + result = super().get_tuple(config) + if result: + return CheckpointTuple(result.config, result.checkpoint, result.metadata) + return result diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/messages.py b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/messages.py new file mode 100644 index 0000000..62f5620 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/messages.py @@ -0,0 +1,50 @@ +"""Redefined messages as a work-around for pydantic issue with AnyStr. + +The code below creates version of pydantic models +that will work in unit tests with AnyStr as id field +Please note that the `id` field is assigned AFTER the model is created +to workaround an issue with pydantic ignoring the __eq__ method on +subclassed strings. +""" + +from typing import Any + +from langchain_core.documents import Document +from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage, ToolMessage + +from .any_str import AnyStr + + +def _AnyIdDocument(**kwargs: Any) -> Document: + """Create a document with an id field.""" + message = Document(**kwargs) + message.id = AnyStr() + return message + + +def _AnyIdAIMessage(**kwargs: Any) -> AIMessage: + """Create ai message with an any id field.""" + message = AIMessage(**kwargs) + message.id = AnyStr() + return message + + +def _AnyIdAIMessageChunk(**kwargs: Any) -> AIMessageChunk: + """Create ai message with an any id field.""" + message = AIMessageChunk(**kwargs) + message.id = AnyStr() + return message + + +def _AnyIdHumanMessage(**kwargs: Any) -> HumanMessage: + """Create a human message with an any id field.""" + message = HumanMessage(**kwargs) + message.id = AnyStr() + return message + + +def _AnyIdToolMessage(**kwargs: Any) -> ToolMessage: + """Create a tool message with an any id field.""" + message = ToolMessage(**kwargs) + message.id = AnyStr() + return message diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_interruption.py b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_interruption.py new file mode 100644 index 0000000..2904f0d --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_interruption.py @@ -0,0 +1,90 @@ +from typing import TypedDict + +import pytest +from pytest_mock import MockerFixture + +from langgraph.graph import END, START, StateGraph + +from .conftest import ( + ALL_CHECKPOINTERS_ASYNC, + ALL_CHECKPOINTERS_SYNC, + awith_checkpointer, +) + +pytestmark = pytest.mark.anyio + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_interruption_without_state_updates( + request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture +) -> None: + """Test interruption without state updates. This test confirms that + interrupting doesn't require a state key having been updated in the prev step""" + + class State(TypedDict): + input: str + + def noop(_state): + pass + + builder = StateGraph(State) + builder.add_node("step_1", noop) + builder.add_node("step_2", noop) + builder.add_node("step_3", noop) + builder.add_edge(START, "step_1") + builder.add_edge("step_1", "step_2") + builder.add_edge("step_2", "step_3") + builder.add_edge("step_3", END) + + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + graph = builder.compile(checkpointer=checkpointer, interrupt_after="*") + + initial_input = {"input": "hello world"} + thread = {"configurable": {"thread_id": "1"}} + + graph.invoke(initial_input, thread, debug=True) + assert graph.get_state(thread).next == ("step_2",) + + graph.invoke(None, thread, debug=True) + assert graph.get_state(thread).next == ("step_3",) + + graph.invoke(None, thread, debug=True) + assert graph.get_state(thread).next == () + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_interruption_without_state_updates_async( + checkpointer_name: str, mocker: MockerFixture +): + """Test interruption without state updates. This test confirms that + interrupting doesn't require a state key having been updated in the prev step""" + + class State(TypedDict): + input: str + + async def noop(_state): + pass + + builder = StateGraph(State) + builder.add_node("step_1", noop) + builder.add_node("step_2", noop) + builder.add_node("step_3", noop) + builder.add_edge(START, "step_1") + builder.add_edge("step_1", "step_2") + builder.add_edge("step_2", "step_3") + builder.add_edge("step_3", END) + + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer, interrupt_after="*") + + initial_input = {"input": "hello world"} + thread = {"configurable": {"thread_id": "1"}} + + await graph.ainvoke(initial_input, thread, debug=True) + assert (await graph.aget_state(thread)).next == ("step_2",) + + await graph.ainvoke(None, thread, debug=True) + assert (await graph.aget_state(thread)).next == ("step_3",) + + await graph.ainvoke(None, thread, debug=True) + assert (await graph.aget_state(thread)).next == () diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_prebuilt.py b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_prebuilt.py new file mode 100644 index 0000000..a64e449 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_prebuilt.py @@ -0,0 +1,1419 @@ +import dataclasses +import json +from functools import partial +from typing import ( + Annotated, + Any, + Callable, + Dict, + List, + Literal, + Optional, + Sequence, + Type, + TypeVar, + Union, +) + +import pytest +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.language_models import BaseChatModel, LanguageModelInput +from langchain_core.messages import ( + AIMessage, + AnyMessage, + BaseMessage, + HumanMessage, + SystemMessage, + ToolCall, + ToolMessage, +) +from langchain_core.outputs import ChatGeneration, ChatResult +from langchain_core.runnables import Runnable, RunnableLambda +from langchain_core.tools import BaseTool, ToolException +from langchain_core.tools import tool as dec_tool +from pydantic import BaseModel, ValidationError +from pydantic.v1 import BaseModel as BaseModelV1 +from pydantic.v1 import ValidationError as ValidationErrorV1 +from typing_extensions import TypedDict + +from langgraph.checkpoint.base import BaseCheckpointSaver +from langgraph.checkpoint.memory import MemorySaver +from langgraph.errors import NodeInterrupt +from langgraph.graph import START, MessagesState, StateGraph, add_messages +from langgraph.prebuilt import ( + ToolNode, + ValidationNode, + create_react_agent, + tools_condition, +) +from langgraph.prebuilt.chat_agent_executor import _validate_chat_history +from langgraph.prebuilt.tool_node import ( + TOOL_CALL_ERROR_TEMPLATE, + InjectedState, + InjectedStore, + _get_state_args, + _infer_handled_types, +) +from langgraph.store.base import BaseStore +from langgraph.store.memory import InMemoryStore +from langgraph.types import Interrupt + +from .conftest import ( + ALL_CHECKPOINTERS_ASYNC, + ALL_CHECKPOINTERS_SYNC, + IS_LANGCHAIN_CORE_030_OR_GREATER, + awith_checkpointer, +) +from .messages import _AnyIdHumanMessage, _AnyIdToolMessage + +pytestmark = pytest.mark.anyio + + +class FakeToolCallingModel(BaseChatModel): + tool_calls: Optional[list[list[ToolCall]]] = None + index: int = 0 + tool_style: Literal["openai", "anthropic"] = "openai" + + def _generate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + """Top Level call""" + messages_string = "-".join([m.content for m in messages]) + tool_calls = ( + self.tool_calls[self.index % len(self.tool_calls)] + if self.tool_calls + else [] + ) + message = AIMessage( + content=messages_string, id=str(self.index), tool_calls=tool_calls.copy() + ) + self.index += 1 + return ChatResult(generations=[ChatGeneration(message=message)]) + + @property + def _llm_type(self) -> str: + return "fake-tool-call-model" + + def bind_tools( + self, + tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], + **kwargs: Any, + ) -> Runnable[LanguageModelInput, BaseMessage]: + tool_dicts = [] + for tool in tools: + if not isinstance(tool, BaseTool): + raise TypeError( + "Only BaseTool is supported by FakeToolCallingModel.bind_tools" + ) + + # NOTE: this is a simplified tool spec for testing purposes only + if self.tool_style == "openai": + tool_dicts.append( + { + "type": "function", + "function": { + "name": tool.name, + }, + } + ) + elif self.tool_style == "anthropic": + tool_dicts.append( + { + "name": tool.name, + } + ) + + return self.bind(tools=tool_dicts) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_no_modifier(request: pytest.FixtureRequest, checkpointer_name: str) -> None: + checkpointer: BaseCheckpointSaver = request.getfixturevalue( + "checkpointer_" + checkpointer_name + ) + model = FakeToolCallingModel() + + agent = create_react_agent(model, [], checkpointer=checkpointer) + inputs = [HumanMessage("hi?")] + thread = {"configurable": {"thread_id": "123"}} + response = agent.invoke({"messages": inputs}, thread, debug=True) + expected_response = {"messages": inputs + [AIMessage(content="hi?", id="0")]} + assert response == expected_response + + if checkpointer: + saved = checkpointer.get_tuple(thread) + assert saved is not None + assert saved.checkpoint["channel_values"] == { + "messages": [ + _AnyIdHumanMessage(content="hi?"), + AIMessage(content="hi?", id="0"), + ], + "agent": "agent", + } + assert saved.metadata == { + "parents": {}, + "source": "loop", + "writes": {"agent": {"messages": [AIMessage(content="hi?", id="0")]}}, + "step": 1, + "thread_id": "123", + } + assert saved.pending_writes == [] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_no_modifier_async(checkpointer_name: str) -> None: + async with awith_checkpointer(checkpointer_name) as checkpointer: + model = FakeToolCallingModel() + + agent = create_react_agent(model, [], checkpointer=checkpointer) + inputs = [HumanMessage("hi?")] + thread = {"configurable": {"thread_id": "123"}} + response = await agent.ainvoke({"messages": inputs}, thread, debug=True) + expected_response = {"messages": inputs + [AIMessage(content="hi?", id="0")]} + assert response == expected_response + + if checkpointer: + saved = await checkpointer.aget_tuple(thread) + assert saved is not None + assert saved.checkpoint["channel_values"] == { + "messages": [ + _AnyIdHumanMessage(content="hi?"), + AIMessage(content="hi?", id="0"), + ], + "agent": "agent", + } + assert saved.metadata == { + "parents": {}, + "source": "loop", + "writes": {"agent": {"messages": [AIMessage(content="hi?", id="0")]}}, + "step": 1, + "thread_id": "123", + } + assert saved.pending_writes == [] + + +def test_passing_two_modifiers(): + model = FakeToolCallingModel() + with pytest.raises(ValueError): + create_react_agent(model, [], messages_modifier="Foo", state_modifier="Bar") + + +def test_system_message_modifier(): + messages_modifier = SystemMessage(content="Foo") + agent_1 = create_react_agent( + FakeToolCallingModel(), [], messages_modifier=messages_modifier + ) + agent_2 = create_react_agent( + FakeToolCallingModel(), [], state_modifier=messages_modifier + ) + for agent in [agent_1, agent_2]: + inputs = [HumanMessage("hi?")] + response = agent.invoke({"messages": inputs}) + expected_response = { + "messages": inputs + [AIMessage(content="Foo-hi?", id="0", tool_calls=[])] + } + assert response == expected_response + + +def test_system_message_string_modifier(): + messages_modifier = "Foo" + agent_1 = create_react_agent( + FakeToolCallingModel(), [], messages_modifier=messages_modifier + ) + agent_2 = create_react_agent( + FakeToolCallingModel(), [], state_modifier=messages_modifier + ) + for agent in [agent_1, agent_2]: + inputs = [HumanMessage("hi?")] + response = agent.invoke({"messages": inputs}) + expected_response = { + "messages": inputs + [AIMessage(content="Foo-hi?", id="0", tool_calls=[])] + } + assert response == expected_response + + +def test_callable_messages_modifier(): + model = FakeToolCallingModel() + + def messages_modifier(messages): + modified_message = f"Bar {messages[-1].content}" + return [HumanMessage(content=modified_message)] + + agent = create_react_agent(model, [], messages_modifier=messages_modifier) + inputs = [HumanMessage("hi?")] + response = agent.invoke({"messages": inputs}) + expected_response = {"messages": inputs + [AIMessage(content="Bar hi?", id="0")]} + assert response == expected_response + + +def test_callable_state_modifier(): + model = FakeToolCallingModel() + + def state_modifier(state): + modified_message = f"Bar {state['messages'][-1].content}" + return [HumanMessage(content=modified_message)] + + agent = create_react_agent(model, [], state_modifier=state_modifier) + inputs = [HumanMessage("hi?")] + response = agent.invoke({"messages": inputs}) + expected_response = {"messages": inputs + [AIMessage(content="Bar hi?", id="0")]} + assert response == expected_response + + +def test_runnable_messages_modifier(): + model = FakeToolCallingModel() + + messages_modifier = RunnableLambda( + lambda messages: [HumanMessage(content=f"Baz {messages[-1].content}")] + ) + + agent = create_react_agent(model, [], messages_modifier=messages_modifier) + inputs = [HumanMessage("hi?")] + response = agent.invoke({"messages": inputs}) + expected_response = {"messages": inputs + [AIMessage(content="Baz hi?", id="0")]} + assert response == expected_response + + +def test_runnable_state_modifier(): + model = FakeToolCallingModel() + + state_modifier = RunnableLambda( + lambda state: [HumanMessage(content=f"Baz {state['messages'][-1].content}")] + ) + + agent = create_react_agent(model, [], state_modifier=state_modifier) + inputs = [HumanMessage("hi?")] + response = agent.invoke({"messages": inputs}) + expected_response = {"messages": inputs + [AIMessage(content="Baz hi?", id="0")]} + assert response == expected_response + + +def test_state_modifier_with_store(): + def add(a: int, b: int): + """Adds a and b""" + return a + b + + in_memory_store = InMemoryStore() + in_memory_store.put(("memories", "1"), "user_name", {"data": "User name is Alice"}) + in_memory_store.put(("memories", "2"), "user_name", {"data": "User name is Bob"}) + + def modify(state, config, *, store): + user_id = config["configurable"]["user_id"] + system_str = store.get(("memories", user_id), "user_name").value["data"] + return [SystemMessage(system_str)] + state["messages"] + + def modify_no_store(state, config): + return SystemMessage("foo") + state["messages"] + + model = FakeToolCallingModel() + + # test state modifier that uses store works + agent = create_react_agent( + model, [add], state_modifier=modify, store=in_memory_store + ) + response = agent.invoke( + {"messages": [("user", "hi")]}, {"configurable": {"user_id": "1"}} + ) + assert response["messages"][-1].content == "User name is Alice-hi" + + # test state modifier that doesn't use store works + agent = create_react_agent( + model, [add], state_modifier=modify_no_store, store=in_memory_store + ) + response = agent.invoke( + {"messages": [("user", "hi")]}, {"configurable": {"user_id": "2"}} + ) + assert response["messages"][-1].content == "foo-hi" + + +@pytest.mark.parametrize("tool_style", ["openai", "anthropic"]) +def test_model_with_tools(tool_style: str): + model = FakeToolCallingModel(tool_style=tool_style) + + @dec_tool + def tool1(some_val: int) -> str: + """Tool 1 docstring.""" + return f"Tool 1: {some_val}" + + @dec_tool + def tool2(some_val: int) -> str: + """Tool 2 docstring.""" + return f"Tool 2: {some_val}" + + # check valid agent constructor + agent = create_react_agent(model.bind_tools([tool1, tool2]), [tool1, tool2]) + result = agent.nodes["tools"].invoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool1", + "args": {"some_val": 2}, + "id": "some 1", + }, + { + "name": "tool2", + "args": {"some_val": 2}, + "id": "some 2", + }, + ], + ) + ] + } + ) + tool_messages: ToolMessage = result["messages"][-2:] + for tool_message in tool_messages: + assert tool_message.type == "tool" + assert tool_message.content in {"Tool 1: 2", "Tool 2: 2"} + assert tool_message.tool_call_id in {"some 1", "some 2"} + + # test mismatching tool lengths + with pytest.raises(ValueError): + create_react_agent(model.bind_tools([tool1]), [tool1, tool2]) + + # test missing bound tools + with pytest.raises(ValueError): + create_react_agent(model.bind_tools([tool1]), [tool2]) + + +def test__validate_messages(): + # empty input + _validate_chat_history([]) + + # single human message + _validate_chat_history( + [ + HumanMessage(content="What's the weather?"), + ] + ) + + # human + AI + _validate_chat_history( + [ + HumanMessage(content="What's the weather?"), + AIMessage(content="The weather is sunny and 75°F."), + ] + ) + + # Answered tool calls + _validate_chat_history( + [ + HumanMessage(content="What's the weather?"), + AIMessage( + content="Let me check that for you.", + tool_calls=[{"id": "call1", "name": "get_weather", "args": {}}], + ), + ToolMessage(content="Sunny, 75°F", tool_call_id="call1"), + AIMessage(content="The weather is sunny and 75°F."), + ] + ) + + # Unanswered tool calls + with pytest.raises(ValueError): + _validate_chat_history( + [ + AIMessage( + content="I'll check that for you.", + tool_calls=[ + {"id": "call1", "name": "get_weather", "args": {}}, + {"id": "call2", "name": "get_time", "args": {}}, + ], + ) + ] + ) + + with pytest.raises(ValueError): + _validate_chat_history( + [ + HumanMessage(content="What's the weather and time?"), + AIMessage( + content="I'll check that for you.", + tool_calls=[ + {"id": "call1", "name": "get_weather", "args": {}}, + {"id": "call2", "name": "get_time", "args": {}}, + ], + ), + ToolMessage(content="Sunny, 75°F", tool_call_id="call1"), + AIMessage( + content="The weather is sunny and 75°F. Let me check the time." + ), + ] + ) + + +def test__infer_handled_types() -> None: + def handle(e): # type: ignore + return "" + + def handle2(e: Exception) -> str: + return "" + + def handle3(e: Union[ValueError, ToolException]) -> str: + return "" + + class Handler: + def handle(self, e: ValueError) -> str: + return "" + + handle4 = Handler().handle + + def handle5(e: Union[Union[TypeError, ValueError], ToolException]): + return "" + + expected: tuple = (Exception,) + actual = _infer_handled_types(handle) + assert expected == actual + + expected = (Exception,) + actual = _infer_handled_types(handle2) + assert expected == actual + + expected = (ValueError, ToolException) + actual = _infer_handled_types(handle3) + assert expected == actual + + expected = (ValueError,) + actual = _infer_handled_types(handle4) + assert expected == actual + + expected = (TypeError, ValueError, ToolException) + actual = _infer_handled_types(handle5) + assert expected == actual + + with pytest.raises(ValueError): + + def handler(e: str): + return "" + + _infer_handled_types(handler) + + with pytest.raises(ValueError): + + def handler(e: list[Exception]): + return "" + + _infer_handled_types(handler) + + with pytest.raises(ValueError): + + def handler(e: Union[str, int]): + return "" + + _infer_handled_types(handler) + + +# tools for testing Too +def tool1(some_val: int, some_other_val: str) -> str: + """Tool 1 docstring.""" + if some_val == 0: + raise ValueError("Test error") + return f"{some_val} - {some_other_val}" + + +async def tool2(some_val: int, some_other_val: str) -> str: + """Tool 2 docstring.""" + if some_val == 0: + raise ToolException("Test error") + return f"tool2: {some_val} - {some_other_val}" + + +async def tool3(some_val: int, some_other_val: str) -> str: + """Tool 3 docstring.""" + return [ + {"key_1": some_val, "key_2": "foo"}, + {"key_1": some_other_val, "key_2": "baz"}, + ] + + +async def tool4(some_val: int, some_other_val: str) -> str: + """Tool 4 docstring.""" + return [ + {"type": "image_url", "image_url": {"url": "abdc"}}, + ] + + +@dec_tool +def tool5(some_val: int): + """Tool 5 docstring.""" + raise ToolException("Test error") + + +tool5.handle_tool_error = "foo" + + +async def test_tool_node(): + result = ToolNode([tool1]).invoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool1", + "args": {"some_val": 1, "some_other_val": "foo"}, + "id": "some 0", + } + ], + ) + ] + } + ) + + tool_message: ToolMessage = result["messages"][-1] + assert tool_message.type == "tool" + assert tool_message.content == "1 - foo" + assert tool_message.tool_call_id == "some 0" + + result2 = await ToolNode([tool2]).ainvoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool2", + "args": {"some_val": 2, "some_other_val": "bar"}, + "id": "some 1", + } + ], + ) + ] + } + ) + + tool_message: ToolMessage = result2["messages"][-1] + assert tool_message.type == "tool" + assert tool_message.content == "tool2: 2 - bar" + + # list of dicts tool content + result3 = await ToolNode([tool3]).ainvoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool3", + "args": {"some_val": 2, "some_other_val": "bar"}, + "id": "some 2", + } + ], + ) + ] + } + ) + tool_message: ToolMessage = result3["messages"][-1] + assert tool_message.type == "tool" + assert ( + tool_message.content + == '[{"key_1": 2, "key_2": "foo"}, {"key_1": "bar", "key_2": "baz"}]' + ) + assert tool_message.tool_call_id == "some 2" + + # list of content blocks tool content + result4 = await ToolNode([tool4]).ainvoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool4", + "args": {"some_val": 2, "some_other_val": "bar"}, + "id": "some 3", + } + ], + ) + ] + } + ) + tool_message: ToolMessage = result4["messages"][-1] + assert tool_message.type == "tool" + assert tool_message.content == [{"type": "image_url", "image_url": {"url": "abdc"}}] + assert tool_message.tool_call_id == "some 3" + + +async def test_tool_node_error_handling(): + def handle_all(e: Union[ValueError, ToolException, ValidationError]): + return TOOL_CALL_ERROR_TEMPLATE.format(error=repr(e)) + + # test catching all exceptions, via: + # - handle_tool_errors = True + # - passing a tuple of all exceptions + # - passing a callable with all exceptions in the signature + for handle_tool_errors in ( + True, + (ValueError, ToolException, ValidationError), + handle_all, + ): + result_error = await ToolNode( + [tool1, tool2, tool3], handle_tool_errors=handle_tool_errors + ).ainvoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool1", + "args": {"some_val": 0, "some_other_val": "foo"}, + "id": "some id", + }, + { + "name": "tool2", + "args": {"some_val": 0, "some_other_val": "bar"}, + "id": "some other id", + }, + { + "name": "tool3", + "args": {"some_val": 0}, + "id": "another id", + }, + ], + ) + ] + } + ) + + assert all(m.type == "tool" for m in result_error["messages"]) + assert all(m.status == "error" for m in result_error["messages"]) + assert ( + result_error["messages"][0].content + == f"Error: {repr(ValueError('Test error'))}\n Please fix your mistakes." + ) + assert ( + result_error["messages"][1].content + == f"Error: {repr(ToolException('Test error'))}\n Please fix your mistakes." + ) + assert ( + "ValidationError" in result_error["messages"][2].content + or "validation error" in result_error["messages"][2].content + ) + + assert result_error["messages"][0].tool_call_id == "some id" + assert result_error["messages"][1].tool_call_id == "some other id" + assert result_error["messages"][2].tool_call_id == "another id" + + +async def test_tool_node_error_handling_callable(): + def handle_value_error(e: ValueError): + return "Value error" + + def handle_tool_exception(e: ToolException): + return "Tool exception" + + for handle_tool_errors in ("Value error", handle_value_error): + result_error = await ToolNode( + [tool1], handle_tool_errors=handle_tool_errors + ).ainvoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool1", + "args": {"some_val": 0, "some_other_val": "foo"}, + "id": "some id", + }, + ], + ) + ] + } + ) + tool_message: ToolMessage = result_error["messages"][-1] + assert tool_message.type == "tool" + assert tool_message.status == "error" + assert tool_message.content == "Value error" + + # test raising for an unhandled exception, via: + # - passing a tuple of all exceptions + # - passing a callable with all exceptions in the signature + for handle_tool_errors in ((ValueError,), handle_value_error): + with pytest.raises(ToolException) as exc_info: + await ToolNode( + [tool1, tool2], handle_tool_errors=handle_tool_errors + ).ainvoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool1", + "args": {"some_val": 0, "some_other_val": "foo"}, + "id": "some id", + }, + { + "name": "tool2", + "args": {"some_val": 0, "some_other_val": "bar"}, + "id": "some other id", + }, + ], + ) + ] + } + ) + assert str(exc_info.value) == "Test error" + + for handle_tool_errors in ((ToolException,), handle_tool_exception): + with pytest.raises(ValueError) as exc_info: + await ToolNode( + [tool1, tool2], handle_tool_errors=handle_tool_errors + ).ainvoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool1", + "args": {"some_val": 0, "some_other_val": "foo"}, + "id": "some id", + }, + { + "name": "tool2", + "args": {"some_val": 0, "some_other_val": "bar"}, + "id": "some other id", + }, + ], + ) + ] + } + ) + assert str(exc_info.value) == "Test error" + + +async def test_tool_node_handle_tool_errors_false(): + with pytest.raises(ValueError) as exc_info: + ToolNode([tool1], handle_tool_errors=False).invoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool1", + "args": {"some_val": 0, "some_other_val": "foo"}, + "id": "some id", + } + ], + ) + ] + } + ) + + assert str(exc_info.value) == "Test error" + + with pytest.raises(ToolException): + await ToolNode([tool2], handle_tool_errors=False).ainvoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool2", + "args": {"some_val": 0, "some_other_val": "bar"}, + "id": "some id", + } + ], + ) + ] + } + ) + + assert str(exc_info.value) == "Test error" + + # test validation errors get raised if handle_tool_errors is False + with pytest.raises((ValidationError, ValidationErrorV1)): + ToolNode([tool1], handle_tool_errors=False).invoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool1", + "args": {"some_val": 0}, + "id": "some id", + } + ], + ) + ] + } + ) + + +def test_tool_node_individual_tool_error_handling(): + # test error handling on individual tools (and that it overrides overall error handling!) + result_individual_tool_error_handler = ToolNode( + [tool5], handle_tool_errors="bar" + ).invoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool5", + "args": {"some_val": 0}, + "id": "some 0", + } + ], + ) + ] + } + ) + + tool_message: ToolMessage = result_individual_tool_error_handler["messages"][-1] + assert tool_message.type == "tool" + assert tool_message.status == "error" + assert tool_message.content == "foo" + assert tool_message.tool_call_id == "some 0" + + +def test_tool_node_incorrect_tool_name(): + result_incorrect_name = ToolNode([tool1, tool2]).invoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool3", + "args": {"some_val": 1, "some_other_val": "foo"}, + "id": "some 0", + } + ], + ) + ] + } + ) + + tool_message: ToolMessage = result_incorrect_name["messages"][-1] + assert tool_message.type == "tool" + assert tool_message.status == "error" + assert ( + tool_message.content + == "Error: tool3 is not a valid tool, try one of [tool1, tool2]." + ) + assert tool_message.tool_call_id == "some 0" + + +def test_tool_node_node_interrupt(): + def tool_normal(some_val: int) -> str: + """Tool docstring.""" + return "normal" + + def tool_interrupt(some_val: int) -> str: + """Tool docstring.""" + raise NodeInterrupt("foo") + + def handle(e: NodeInterrupt): + return "handled" + + for handle_tool_errors in (True, (NodeInterrupt,), "handled", handle, False): + node = ToolNode([tool_interrupt], handle_tool_errors=handle_tool_errors) + with pytest.raises(NodeInterrupt) as exc_info: + node.invoke( + { + "messages": [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": "tool_interrupt", + "args": {"some_val": 0}, + "id": "some 0", + } + ], + ) + ] + } + ) + assert exc_info.value == "foo" + + # test inside react agent + model = FakeToolCallingModel( + tool_calls=[ + [ + ToolCall(name="tool_interrupt", args={"some_val": 0}, id="1"), + ToolCall(name="tool_normal", args={"some_val": 1}, id="2"), + ], + [], + ] + ) + checkpointer = MemorySaver() + config = {"configurable": {"thread_id": "1"}} + agent = create_react_agent( + model, [tool_interrupt, tool_normal], checkpointer=checkpointer + ) + result = agent.invoke({"messages": [HumanMessage("hi?")]}, config) + assert result["messages"] == [ + _AnyIdHumanMessage( + content="hi?", + ), + AIMessage( + content="hi?", + id="0", + tool_calls=[ + { + "name": "tool_interrupt", + "args": {"some_val": 0}, + "id": "1", + "type": "tool_call", + }, + { + "name": "tool_normal", + "args": {"some_val": 1}, + "id": "2", + "type": "tool_call", + }, + ], + ), + ] + state = agent.get_state(config) + assert state.next == ("tools",) + task = state.tasks[0] + assert task.name == "tools" + assert task.interrupts == (Interrupt(value="foo", when="during"),) + + +def my_function(some_val: int, some_other_val: str) -> str: + return f"{some_val} - {some_other_val}" + + +class MyModel(BaseModel): + some_val: int + some_other_val: str + + +class MyModelV1(BaseModelV1): + some_val: int + some_other_val: str + + +@dec_tool +def my_tool(some_val: int, some_other_val: str) -> str: + """Cool.""" + return f"{some_val} - {some_other_val}" + + +@pytest.mark.parametrize( + "tool_schema", + [ + my_function, + MyModel, + MyModelV1, + my_tool, + ], +) +@pytest.mark.parametrize("use_message_key", [True, False]) +async def test_validation_node(tool_schema: Any, use_message_key: bool): + validation_node = ValidationNode([tool_schema]) + tool_name = getattr(tool_schema, "name", getattr(tool_schema, "__name__", None)) + inputs = [ + AIMessage( + "hi?", + tool_calls=[ + { + "name": tool_name, + "args": {"some_val": 1, "some_other_val": "foo"}, + "id": "some 0", + }, + { + "name": tool_name, + # Wrong type for some_val + "args": {"some_val": "bar", "some_other_val": "foo"}, + "id": "some 1", + }, + ], + ), + ] + if use_message_key: + inputs = {"messages": inputs} + result = await validation_node.ainvoke(inputs) + if use_message_key: + result = result["messages"] + + def check_results(messages: list): + assert len(messages) == 2 + assert all(m.type == "tool" for m in messages) + assert not messages[0].additional_kwargs.get("is_error") + assert messages[1].additional_kwargs.get("is_error") + + check_results(result) + result_sync = validation_node.invoke(inputs) + if use_message_key: + result_sync = result_sync["messages"] + check_results(result_sync) + + +class _InjectStateSchema(TypedDict): + messages: list + foo: str + + +class _InjectedStatePydanticSchema(BaseModelV1): + messages: list + foo: str + + +class _InjectedStatePydanticV2Schema(BaseModel): + messages: list + foo: str + + +@dataclasses.dataclass +class _InjectedStateDataclassSchema: + messages: list + foo: str + + +T = TypeVar("T") + + +@pytest.mark.parametrize( + "schema_", + [ + _InjectStateSchema, + _InjectedStatePydanticSchema, + _InjectedStatePydanticV2Schema, + _InjectedStateDataclassSchema, + ], +) +def test_tool_node_inject_state(schema_: Type[T]) -> None: + def tool1(some_val: int, state: Annotated[T, InjectedState]) -> str: + """Tool 1 docstring.""" + if isinstance(state, dict): + return state["foo"] + else: + return state.foo + + def tool2(some_val: int, state: Annotated[T, InjectedState()]) -> str: + """Tool 2 docstring.""" + if isinstance(state, dict): + return state["foo"] + else: + return state.foo + + def tool3( + some_val: int, + foo: Annotated[str, InjectedState("foo")], + msgs: Annotated[List[AnyMessage], InjectedState("messages")], + ) -> str: + """Tool 1 docstring.""" + return foo + + def tool4( + some_val: int, msgs: Annotated[List[AnyMessage], InjectedState("messages")] + ) -> str: + """Tool 1 docstring.""" + return msgs[0].content + + node = ToolNode([tool1, tool2, tool3, tool4]) + for tool_name in ("tool1", "tool2", "tool3"): + tool_call = { + "name": tool_name, + "args": {"some_val": 1}, + "id": "some 0", + "type": "tool_call", + } + msg = AIMessage("hi?", tool_calls=[tool_call]) + result = node.invoke(schema_(**{"messages": [msg], "foo": "bar"})) + tool_message = result["messages"][-1] + assert tool_message.content == "bar", f"Failed for tool={tool_name}" + + if tool_name == "tool3": + failure_input = None + try: + failure_input = schema_(**{"messages": [msg], "notfoo": "bar"}) + except Exception: + pass + if failure_input is not None: + with pytest.raises(KeyError): + node.invoke(failure_input) + + with pytest.raises(ValueError): + node.invoke([msg]) + else: + failure_input = None + try: + failure_input = schema_(**{"messages": [msg], "notfoo": "bar"}) + except Exception: + # We'd get a validation error from pydantic state and wouldn't make it to the node + # anyway + pass + if failure_input is not None: + messages_ = node.invoke(failure_input) + tool_message = messages_["messages"][-1] + assert "KeyError" in tool_message.content + tool_message = node.invoke([msg])[-1] + assert "KeyError" in tool_message.content + + tool_call = { + "name": "tool4", + "args": {"some_val": 1}, + "id": "some 0", + "type": "tool_call", + } + msg = AIMessage("hi?", tool_calls=[tool_call]) + result = node.invoke(schema_(**{"messages": [msg], "foo": ""})) + tool_message = result["messages"][-1] + assert tool_message.content == "hi?" + + result = node.invoke([msg]) + tool_message = result[-1] + assert tool_message.content == "hi?" + + +@pytest.mark.skipif( + not IS_LANGCHAIN_CORE_030_OR_GREATER, + reason="Langchain core 0.3.0 or greater is required", +) +def test_tool_node_inject_store() -> None: + store = InMemoryStore() + namespace = ("test",) + + def tool1(some_val: int, store: Annotated[BaseStore, InjectedStore()]) -> str: + """Tool 1 docstring.""" + store_val = store.get(namespace, "test_key").value["foo"] + return f"Some val: {some_val}, store val: {store_val}" + + def tool2(some_val: int, store: Annotated[BaseStore, InjectedStore()]) -> str: + """Tool 2 docstring.""" + store_val = store.get(namespace, "test_key").value["foo"] + return f"Some val: {some_val}, store val: {store_val}" + + def tool3( + some_val: int, + bar: Annotated[str, InjectedState("bar")], + store: Annotated[BaseStore, InjectedStore()], + ) -> str: + """Tool 3 docstring.""" + store_val = store.get(namespace, "test_key").value["foo"] + return f"Some val: {some_val}, store val: {store_val}, state val: {bar}" + + node = ToolNode([tool1, tool2, tool3], handle_tool_errors=True) + store.put(namespace, "test_key", {"foo": "bar"}) + + class State(MessagesState): + bar: str + + builder = StateGraph(State) + builder.add_node("tools", node) + builder.add_edge(START, "tools") + graph = builder.compile(store=store) + + for tool_name in ("tool1", "tool2"): + tool_call = { + "name": tool_name, + "args": {"some_val": 1}, + "id": "some 0", + "type": "tool_call", + } + msg = AIMessage("hi?", tool_calls=[tool_call]) + node_result = node.invoke({"messages": [msg]}, store=store) + graph_result = graph.invoke({"messages": [msg]}) + for result in (node_result, graph_result): + result["messages"][-1] + tool_message = result["messages"][-1] + assert ( + tool_message.content == "Some val: 1, store val: bar" + ), f"Failed for tool={tool_name}" + + tool_call = { + "name": "tool3", + "args": {"some_val": 1}, + "id": "some 0", + "type": "tool_call", + } + msg = AIMessage("hi?", tool_calls=[tool_call]) + node_result = node.invoke({"messages": [msg], "bar": "baz"}, store=store) + graph_result = graph.invoke({"messages": [msg], "bar": "baz"}) + for result in (node_result, graph_result): + result["messages"][-1] + tool_message = result["messages"][-1] + assert ( + tool_message.content == "Some val: 1, store val: bar, state val: baz" + ), f"Failed for tool={tool_name}" + + # test injected store without passing store to compiled graph + failing_graph = builder.compile() + with pytest.raises(ValueError): + failing_graph.invoke({"messages": [msg], "bar": "baz"}) + + +def test_tool_node_ensure_utf8() -> None: + @dec_tool + def get_day_list(days: list[str]) -> list[str]: + """choose days""" + return days + + data = ["星期一", "水曜日", "목요일", "Friday"] + tools = [get_day_list] + tool_calls = [ToolCall(name=get_day_list.name, args={"days": data}, id="test_id")] + outputs: list[ToolMessage] = ToolNode(tools).invoke( + [AIMessage(content="", tool_calls=tool_calls)] + ) + assert outputs[0].content == json.dumps(data, ensure_ascii=False) + + +def test_tool_node_messages_key() -> None: + @dec_tool + def add(a: int, b: int): + """Adds a and b.""" + return a + b + + model = FakeToolCallingModel( + tool_calls=[[ToolCall(name=add.name, args={"a": 1, "b": 2}, id="test_id")]] + ) + + class State(TypedDict): + subgraph_messages: Annotated[list[AnyMessage], add_messages] + + def call_model(state: State): + response = model.invoke(state["subgraph_messages"]) + model.tool_calls = [] + return {"subgraph_messages": response} + + builder = StateGraph(State) + builder.add_node("agent", call_model) + builder.add_node("tools", ToolNode([add], messages_key="subgraph_messages")) + builder.add_conditional_edges( + "agent", partial(tools_condition, messages_key="subgraph_messages") + ) + builder.add_edge(START, "agent") + builder.add_edge("tools", "agent") + + graph = builder.compile() + result = graph.invoke({"subgraph_messages": [HumanMessage(content="hi")]}) + assert result["subgraph_messages"] == [ + _AnyIdHumanMessage(content="hi"), + AIMessage( + content="hi", + id="0", + tool_calls=[ToolCall(name=add.name, args={"a": 1, "b": 2}, id="test_id")], + ), + _AnyIdToolMessage(content="3", name=add.name, tool_call_id="test_id"), + AIMessage(content="hi-hi-3", id="1"), + ] + + +async def test_return_direct() -> None: + @dec_tool(return_direct=True) + def tool_return_direct(input: str) -> str: + """A tool that returns directly.""" + return f"Direct result: {input}" + + @dec_tool + def tool_normal(input: str) -> str: + """A normal tool.""" + return f"Normal result: {input}" + + first_tool_call = [ + ToolCall( + name="tool_return_direct", + args={"input": "Test direct"}, + id="1", + ), + ] + expected_ai = AIMessage( + content="Test direct", + id="0", + tool_calls=first_tool_call, + ) + model = FakeToolCallingModel(tool_calls=[first_tool_call, []]) + agent = create_react_agent(model, [tool_return_direct, tool_normal]) + + # Test direct return for tool_return_direct + result = agent.invoke( + {"messages": [HumanMessage(content="Test direct", id="hum0")]} + ) + assert result["messages"] == [ + HumanMessage(content="Test direct", id="hum0"), + expected_ai, + ToolMessage( + content="Direct result: Test direct", + name="tool_return_direct", + tool_call_id="1", + id=result["messages"][2].id, + ), + ] + second_tool_call = [ + ToolCall( + name="tool_normal", + args={"input": "Test normal"}, + id="2", + ), + ] + model = FakeToolCallingModel(tool_calls=[second_tool_call, []]) + agent = create_react_agent(model, [tool_return_direct, tool_normal]) + result = agent.invoke( + {"messages": [HumanMessage(content="Test normal", id="hum1")]} + ) + assert result["messages"] == [ + HumanMessage(content="Test normal", id="hum1"), + AIMessage(content="Test normal", id="0", tool_calls=second_tool_call), + ToolMessage( + content="Normal result: Test normal", + name="tool_normal", + tool_call_id="2", + id=result["messages"][2].id, + ), + AIMessage(content="Test normal-Test normal-Normal result: Test normal", id="1"), + ] + + both_tool_calls = [ + ToolCall( + name="tool_return_direct", + args={"input": "Test both direct"}, + id="3", + ), + ToolCall( + name="tool_normal", + args={"input": "Test both normal"}, + id="4", + ), + ] + model = FakeToolCallingModel(tool_calls=[both_tool_calls, []]) + agent = create_react_agent(model, [tool_return_direct, tool_normal]) + result = agent.invoke({"messages": [HumanMessage(content="Test both", id="hum2")]}) + assert result["messages"] == [ + HumanMessage(content="Test both", id="hum2"), + AIMessage(content="Test both", id="0", tool_calls=both_tool_calls), + ToolMessage( + content="Direct result: Test both direct", + name="tool_return_direct", + tool_call_id="3", + id=result["messages"][2].id, + ), + ToolMessage( + content="Normal result: Test both normal", + name="tool_normal", + tool_call_id="4", + id=result["messages"][3].id, + ), + ] + + +def test__get_state_args() -> None: + class Schema1(BaseModel): + a: Annotated[str, InjectedState] + + class Schema2(Schema1): + b: Annotated[int, InjectedState("bar")] + + @dec_tool(args_schema=Schema2) + def foo(a: str, b: int) -> float: + """return""" + return 0.0 + + assert _get_state_args(foo) == {"a": None, "b": "bar"} diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_pregel.py b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_pregel.py new file mode 100644 index 0000000..b988154 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_pregel.py @@ -0,0 +1,14381 @@ +import enum +import json +import operator +import re +import time +import uuid +import warnings +from collections import Counter +from concurrent.futures import ThreadPoolExecutor +from contextlib import contextmanager +from dataclasses import replace +from random import randrange +from typing import ( + Annotated, + Any, + Dict, + Generator, + Iterator, + List, + Literal, + Optional, + Sequence, + Tuple, + TypedDict, + Union, + cast, + get_type_hints, +) + +import httpx +import pytest +from langchain_core.runnables import ( + RunnableConfig, + RunnableLambda, + RunnableMap, + RunnablePassthrough, + RunnablePick, +) +from langsmith import traceable +from pydantic import BaseModel +from pytest_mock import MockerFixture +from syrupy import SnapshotAssertion + +from langgraph.channels.base import BaseChannel +from langgraph.channels.binop import BinaryOperatorAggregate +from langgraph.channels.context import Context +from langgraph.channels.ephemeral_value import EphemeralValue +from langgraph.channels.last_value import LastValue +from langgraph.channels.topic import Topic +from langgraph.channels.untracked_value import UntrackedValue +from langgraph.checkpoint.base import ( + BaseCheckpointSaver, + Checkpoint, + CheckpointMetadata, + CheckpointTuple, +) +from langgraph.checkpoint.memory import MemorySaver +from langgraph.constants import ( + CONFIG_KEY_NODE_FINISHED, + ERROR, + FF_SEND_V2, + PULL, + PUSH, + START, +) +from langgraph.errors import InvalidUpdateError, MultipleSubgraphsError, NodeInterrupt +from langgraph.graph import END, Graph, GraphCommand, StateGraph +from langgraph.graph.message import MessageGraph, MessagesState, add_messages +from langgraph.managed.shared_value import SharedValue +from langgraph.prebuilt.chat_agent_executor import ( + create_tool_calling_executor, +) +from langgraph.prebuilt.tool_node import ToolNode +from langgraph.pregel import ( + Channel, + GraphRecursionError, + Pregel, + StateSnapshot, +) +from langgraph.pregel.retry import RetryPolicy +from langgraph.store.base import BaseStore +from langgraph.store.memory import InMemoryStore +from langgraph.types import ( + Command, + Interrupt, + PregelTask, + Send, + StreamWriter, + interrupt, +) + +from .any_str import AnyDict, AnyStr, AnyVersion, FloatBetween, UnsortedSequence +from .conftest import ( + ALL_CHECKPOINTERS_SYNC, + ALL_STORES_SYNC, + SHOULD_CHECK_SNAPSHOTS, +) +from .fake_chat import FakeChatModel +from .fake_tracer import FakeTracer +from .memory_assert import MemorySaverAssertCheckpointMetadata +from .messages import ( + _AnyIdAIMessage, + _AnyIdAIMessageChunk, + _AnyIdHumanMessage, + _AnyIdToolMessage, +) + + +# define these objects to avoid importing langchain_core.agents +# and therefore avoid relying on core Pydantic version +class AgentAction(BaseModel): + tool: str + tool_input: Union[str, dict] + log: str + type: Literal["AgentAction"] = "AgentAction" + + model_config = { + "json_schema_extra": { + "description": ( + """Represents a request to execute an action by an agent. + +The action consists of the name of the tool to execute and the input to pass +to the tool. The log is used to pass along extra information about the action.""" + ) + } + } + + +class AgentFinish(BaseModel): + """Final return value of an ActionAgent. + + Agents return an AgentFinish when they have reached a stopping condition. + """ + + return_values: dict + log: str + type: Literal["AgentFinish"] = "AgentFinish" + model_config = { + "json_schema_extra": { + "description": ( + """Final return value of an ActionAgent. + +Agents return an AgentFinish when they have reached a stopping condition.""" + ) + } + } + + +def test_graph_validation() -> None: + def logic(inp: str) -> str: + return "" + + workflow = Graph() + workflow.add_node("agent", logic) + workflow.set_entry_point("agent") + workflow.set_finish_point("agent") + assert workflow.compile(), "valid graph" + + # Accept a dead-end + workflow = Graph() + workflow.add_node("agent", logic) + workflow.set_entry_point("agent") + workflow.compile() + + workflow = Graph() + workflow.add_node("agent", logic) + workflow.set_finish_point("agent") + with pytest.raises(ValueError, match="not reachable"): + workflow.compile() + + workflow = Graph() + workflow.add_node("agent", logic) + workflow.add_node("tools", logic) + workflow.set_entry_point("agent") + workflow.add_conditional_edges("agent", logic, {"continue": "tools", "exit": END}) + workflow.add_edge("tools", "agent") + assert workflow.compile(), "valid graph" + + workflow = Graph() + workflow.add_node("agent", logic) + workflow.add_node("tools", logic) + workflow.set_entry_point("tools") + workflow.add_conditional_edges("agent", logic, {"continue": "tools", "exit": END}) + workflow.add_edge("tools", "agent") + assert workflow.compile(), "valid graph" + + workflow = Graph() + workflow.set_entry_point("tools") + workflow.add_conditional_edges("agent", logic, {"continue": "tools", "exit": END}) + workflow.add_edge("tools", "agent") + workflow.add_node("agent", logic) + workflow.add_node("tools", logic) + assert workflow.compile(), "valid graph" + + workflow = Graph() + workflow.set_entry_point("tools") + workflow.add_conditional_edges( + "agent", logic, {"continue": "tools", "exit": END, "hmm": "extra"} + ) + workflow.add_edge("tools", "agent") + workflow.add_node("agent", logic) + workflow.add_node("tools", logic) + with pytest.raises(ValueError, match="unknown"): # extra is not defined + workflow.compile() + + workflow = Graph() + workflow.set_entry_point("agent") + workflow.add_conditional_edges("agent", logic, {"continue": "tools", "exit": END}) + workflow.add_edge("tools", "extra") + workflow.add_node("agent", logic) + workflow.add_node("tools", logic) + with pytest.raises(ValueError, match="unknown"): # extra is not defined + workflow.compile() + + workflow = Graph() + workflow.add_node("agent", logic) + workflow.add_node("tools", logic) + workflow.add_node("extra", logic) + workflow.set_entry_point("agent") + workflow.add_conditional_edges("agent", logic, {"continue": "tools", "exit": END}) + workflow.add_edge("tools", "agent") + with pytest.raises( + ValueError, match="Node `extra` is not reachable" + ): # extra is not reachable + workflow.compile() + + workflow = Graph() + workflow.add_node("agent", logic) + workflow.add_node("tools", logic) + workflow.add_node("extra", logic) + workflow.set_entry_point("agent") + workflow.add_conditional_edges("agent", logic) + workflow.add_edge("tools", "agent") + # Accept, even though extra is dead-end + workflow.compile() + + class State(TypedDict): + hello: str + + def node_a(state: State) -> State: + # typo + return {"hell": "world"} + + builder = StateGraph(State) + builder.add_node("a", node_a) + builder.set_entry_point("a") + builder.set_finish_point("a") + graph = builder.compile() + with pytest.raises(InvalidUpdateError): + graph.invoke({"hello": "there"}) + + graph = StateGraph(State) + graph.add_node("start", lambda x: x) + graph.add_edge("__start__", "start") + graph.add_edge("unknown", "start") + graph.add_edge("start", "__end__") + with pytest.raises(ValueError, match="Found edge starting at unknown node "): + graph.compile() + + def bad_reducer(a): ... + + class BadReducerState(TypedDict): + hello: Annotated[str, bad_reducer] + + with pytest.raises(ValueError, match="Invalid reducer"): + StateGraph(BadReducerState) + + def node_b(state: State) -> State: + return {"hello": "world"} + + builder = StateGraph(State) + builder.add_node("a", node_b) + builder.add_node("b", node_b) + builder.add_node("c", node_b) + builder.set_entry_point("a") + builder.add_edge("a", "b") + builder.add_edge("a", "c") + graph = builder.compile() + + with pytest.raises(InvalidUpdateError, match="At key 'hello'"): + graph.invoke({"hello": "there"}) + + +def test_checkpoint_errors() -> None: + class FaultyGetCheckpointer(MemorySaver): + def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: + raise ValueError("Faulty get_tuple") + + class FaultyPutCheckpointer(MemorySaver): + def put( + self, + config: RunnableConfig, + checkpoint: Checkpoint, + metadata: CheckpointMetadata, + new_versions: Optional[dict[str, Union[str, int, float]]] = None, + ) -> RunnableConfig: + raise ValueError("Faulty put") + + class FaultyPutWritesCheckpointer(MemorySaver): + def put_writes( + self, config: RunnableConfig, writes: List[Tuple[str, Any]], task_id: str + ) -> RunnableConfig: + raise ValueError("Faulty put_writes") + + class FaultyVersionCheckpointer(MemorySaver): + def get_next_version(self, current: Optional[int], channel: BaseChannel) -> int: + raise ValueError("Faulty get_next_version") + + def logic(inp: str) -> str: + return "" + + builder = StateGraph(Annotated[str, operator.add]) + builder.add_node("agent", logic) + builder.add_edge(START, "agent") + + graph = builder.compile(checkpointer=FaultyGetCheckpointer()) + with pytest.raises(ValueError, match="Faulty get_tuple"): + graph.invoke("", {"configurable": {"thread_id": "thread-1"}}) + + graph = builder.compile(checkpointer=FaultyPutCheckpointer()) + with pytest.raises(ValueError, match="Faulty put"): + graph.invoke("", {"configurable": {"thread_id": "thread-1"}}) + + graph = builder.compile(checkpointer=FaultyVersionCheckpointer()) + with pytest.raises(ValueError, match="Faulty get_next_version"): + graph.invoke("", {"configurable": {"thread_id": "thread-1"}}) + + # add parallel node + builder.add_node("parallel", logic) + builder.add_edge(START, "parallel") + graph = builder.compile(checkpointer=FaultyPutWritesCheckpointer()) + with pytest.raises(ValueError, match="Faulty put_writes"): + graph.invoke("", {"configurable": {"thread_id": "thread-1"}}) + + +def test_node_schemas_custom_output() -> None: + class State(TypedDict): + hello: str + bye: str + messages: Annotated[list[str], add_messages] + + class Output(TypedDict): + messages: list[str] + + class StateForA(TypedDict): + hello: str + messages: Annotated[list[str], add_messages] + + def node_a(state: StateForA) -> State: + assert state == { + "hello": "there", + "messages": [_AnyIdHumanMessage(content="hello")], + } + + class StateForB(TypedDict): + bye: str + now: int + + def node_b(state: StateForB): + assert state == { + "bye": "world", + } + return { + "now": 123, + "hello": "again", + } + + class StateForC(TypedDict): + hello: str + now: int + + def node_c(state: StateForC) -> StateForC: + assert state == { + "hello": "again", + "now": 123, + } + + builder = StateGraph(State, output=Output) + builder.add_node("a", node_a) + builder.add_node("b", node_b) + builder.add_node("c", node_c) + builder.add_edge(START, "a") + builder.add_edge("a", "b") + builder.add_edge("b", "c") + graph = builder.compile() + + assert graph.invoke({"hello": "there", "bye": "world", "messages": "hello"}) == { + "messages": [_AnyIdHumanMessage(content="hello")], + } + + builder = StateGraph(State, output=Output) + builder.add_node("a", node_a) + builder.add_node("b", node_b) + builder.add_node("c", node_c) + builder.add_edge(START, "a") + builder.add_edge("a", "b") + builder.add_edge("b", "c") + graph = builder.compile() + + assert graph.invoke( + { + "hello": "there", + "bye": "world", + "messages": "hello", + "now": 345, # ignored because not in input schema + } + ) == { + "messages": [_AnyIdHumanMessage(content="hello")], + } + + assert [ + c + for c in graph.stream( + { + "hello": "there", + "bye": "world", + "messages": "hello", + "now": 345, # ignored because not in input schema + } + ) + ] == [ + {"a": None}, + {"b": {"hello": "again", "now": 123}}, + {"c": None}, + ] + + +def test_reducer_before_first_node() -> None: + class State(TypedDict): + hello: str + messages: Annotated[list[str], add_messages] + + def node_a(state: State) -> State: + assert state == { + "hello": "there", + "messages": [_AnyIdHumanMessage(content="hello")], + } + + builder = StateGraph(State) + builder.add_node("a", node_a) + builder.set_entry_point("a") + builder.set_finish_point("a") + graph = builder.compile() + assert graph.invoke({"hello": "there", "messages": "hello"}) == { + "hello": "there", + "messages": [_AnyIdHumanMessage(content="hello")], + } + + class State(TypedDict): + hello: str + messages: Annotated[List[str], add_messages] + + def node_a(state: State) -> State: + assert state == { + "hello": "there", + "messages": [_AnyIdHumanMessage(content="hello")], + } + + builder = StateGraph(State) + builder.add_node("a", node_a) + builder.set_entry_point("a") + builder.set_finish_point("a") + graph = builder.compile() + assert graph.invoke({"hello": "there", "messages": "hello"}) == { + "hello": "there", + "messages": [_AnyIdHumanMessage(content="hello")], + } + + class State(TypedDict): + hello: str + messages: Annotated[Sequence[str], add_messages] + + def node_a(state: State) -> State: + assert state == { + "hello": "there", + "messages": [_AnyIdHumanMessage(content="hello")], + } + + builder = StateGraph(State) + builder.add_node("a", node_a) + builder.set_entry_point("a") + builder.set_finish_point("a") + graph = builder.compile() + assert graph.invoke({"hello": "there", "messages": "hello"}) == { + "hello": "there", + "messages": [_AnyIdHumanMessage(content="hello")], + } + + +def test_invoke_single_process_in_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={ + "one": chain, + }, + channels={ + "input": LastValue(int), + "output": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + graph = Graph() + graph.add_node("add_one", add_one) + graph.set_entry_point("add_one") + graph.set_finish_point("add_one") + gapp = graph.compile() + + if SHOULD_CHECK_SNAPSHOTS: + assert app.input_schema.model_json_schema() == { + "title": "LangGraphInput", + "type": "integer", + } + assert app.output_schema.model_json_schema() == { + "title": "LangGraphOutput", + "type": "integer", + } + with warnings.catch_warnings(): + warnings.simplefilter("error") # raise warnings as errors + assert app.config_schema().model_json_schema() == { + "properties": {}, + "title": "LangGraphConfig", + "type": "object", + } + + assert app.invoke(2) == 3 + assert app.invoke(2, output_keys=["output"]) == {"output": 3} + assert repr(app), "does not raise recursion error" + + assert gapp.invoke(2, debug=True) == 3 + + +@pytest.mark.parametrize( + "falsy_value", + [None, False, 0, "", [], {}, set(), frozenset(), 0.0, 0j], +) +def test_invoke_single_process_in_out_falsy_values(falsy_value: Any) -> None: + graph = Graph() + graph.add_node("return_falsy_const", lambda *args, **kwargs: falsy_value) + graph.set_entry_point("return_falsy_const") + graph.set_finish_point("return_falsy_const") + gapp = graph.compile() + assert gapp.invoke(1) == falsy_value + + +def test_invoke_single_process_in_write_kwargs(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + chain = ( + Channel.subscribe_to("input") + | add_one + | Channel.write_to("output", fixed=5, output_plus_one=lambda x: x + 1) + ) + + app = Pregel( + nodes={"one": chain}, + channels={ + "input": LastValue(int), + "output": LastValue(int), + "fixed": LastValue(int), + "output_plus_one": LastValue(int), + }, + output_channels=["output", "fixed", "output_plus_one"], + input_channels="input", + ) + + if SHOULD_CHECK_SNAPSHOTS: + assert app.input_schema.model_json_schema() == { + "title": "LangGraphInput", + "type": "integer", + } + assert app.output_schema.model_json_schema() == { + "title": "LangGraphOutput", + "type": "object", + "properties": { + "output": {"title": "Output", "type": "integer", "default": None}, + "fixed": {"title": "Fixed", "type": "integer", "default": None}, + "output_plus_one": { + "title": "Output Plus One", + "type": "integer", + "default": None, + }, + }, + } + assert app.invoke(2) == {"output": 3, "fixed": 5, "output_plus_one": 4} + + +def test_invoke_single_process_in_out_dict(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": chain}, + channels={"input": LastValue(int), "output": LastValue(int)}, + input_channels="input", + output_channels=["output"], + ) + + if SHOULD_CHECK_SNAPSHOTS: + assert app.input_schema.model_json_schema() == { + "title": "LangGraphInput", + "type": "integer", + } + assert app.output_schema.model_json_schema() == { + "title": "LangGraphOutput", + "type": "object", + "properties": { + "output": {"title": "Output", "type": "integer", "default": None} + }, + } + assert app.invoke(2) == {"output": 3} + + +def test_invoke_single_process_in_dict_out_dict(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": chain}, + channels={"input": LastValue(int), "output": LastValue(int)}, + input_channels=["input"], + output_channels=["output"], + ) + if SHOULD_CHECK_SNAPSHOTS: + assert app.input_schema.model_json_schema() == { + "title": "LangGraphInput", + "type": "object", + "properties": { + "input": {"title": "Input", "type": "integer", "default": None} + }, + } + assert app.output_schema.model_json_schema() == { + "title": "LangGraphOutput", + "type": "object", + "properties": { + "output": {"title": "Output", "type": "integer", "default": None} + }, + } + assert app.invoke({"input": 2}) == {"output": 3} + + +def test_invoke_two_processes_in_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + two = Channel.subscribe_to("inbox") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "inbox": LastValue(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + + assert app.invoke(2) == 4 + + with pytest.raises(GraphRecursionError): + app.invoke(2, {"recursion_limit": 1}, debug=1) + + graph = Graph() + graph.add_node("add_one", add_one) + graph.add_node("add_one_more", add_one) + graph.set_entry_point("add_one") + graph.set_finish_point("add_one_more") + graph.add_edge("add_one", "add_one_more") + gapp = graph.compile() + + assert gapp.invoke(2) == 4 + + for step, values in enumerate(gapp.stream(2, debug=1), start=1): + if step == 1: + assert values == { + "add_one": 3, + } + elif step == 2: + assert values == { + "add_one_more": 4, + } + else: + assert 0, f"{step}:{values}" + assert step == 2 + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_invoke_two_processes_in_out_interrupt( + request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture +) -> None: + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + add_one = mocker.Mock(side_effect=lambda x: x + 1) + one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + two = Channel.subscribe_to("inbox") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "inbox": LastValue(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + checkpointer=checkpointer, + interrupt_after_nodes=["one"], + ) + thread1 = {"configurable": {"thread_id": "1"}} + thread2 = {"configurable": {"thread_id": "2"}} + + # start execution, stop at inbox + assert app.invoke(2, thread1) is None + + # inbox == 3 + checkpoint = checkpointer.get(thread1) + assert checkpoint is not None + assert checkpoint["channel_values"]["inbox"] == 3 + + # resume execution, finish + assert app.invoke(None, thread1) == 4 + + # start execution again, stop at inbox + assert app.invoke(20, thread1) is None + + # inbox == 21 + checkpoint = checkpointer.get(thread1) + assert checkpoint is not None + assert checkpoint["channel_values"]["inbox"] == 21 + + # send a new value in, interrupting the previous execution + assert app.invoke(3, thread1) is None + assert app.invoke(None, thread1) == 5 + + # start execution again, stopping at inbox + assert app.invoke(20, thread2) is None + + # inbox == 21 + snapshot = app.get_state(thread2) + assert snapshot.values["inbox"] == 21 + assert snapshot.next == ("two",) + + # update the state, resume + app.update_state(thread2, 25, as_node="one") + assert app.invoke(None, thread2) == 26 + + # no pending tasks + snapshot = app.get_state(thread2) + assert snapshot.next == () + + # list history + history = [c for c in app.get_state_history(thread1)] + assert history == [ + StateSnapshot( + values={"inbox": 4, "output": 5, "input": 3}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 6, + "writes": {"two": 5}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[1].config, + ), + StateSnapshot( + values={"inbox": 4, "output": 4, "input": 3}, + tasks=(PregelTask(AnyStr(), "two", (PULL, "two"), result={"output": 5}),), + next=("two",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 5, + "writes": {"one": None}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[2].config, + ), + StateSnapshot( + values={"inbox": 21, "output": 4, "input": 3}, + tasks=(PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 4}),), + next=("one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "step": 4, + "writes": {"input": 3}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[3].config, + ), + StateSnapshot( + values={"inbox": 21, "output": 4, "input": 20}, + tasks=(PregelTask(AnyStr(), "two", (PULL, "two")),), + next=("two",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"one": None}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[4].config, + ), + StateSnapshot( + values={"inbox": 3, "output": 4, "input": 20}, + tasks=(PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 21}),), + next=("one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "step": 2, + "writes": {"input": 20}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[5].config, + ), + StateSnapshot( + values={"inbox": 3, "output": 4, "input": 2}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"two": 4}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[6].config, + ), + StateSnapshot( + values={"inbox": 3, "input": 2}, + tasks=(PregelTask(AnyStr(), "two", (PULL, "two"), result={"output": 4}),), + next=("two",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": {"one": None}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[7].config, + ), + StateSnapshot( + values={"input": 2}, + tasks=(PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 3}),), + next=("one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "step": -1, + "writes": {"input": 2}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ), + ] + + # re-running from any previous checkpoint should re-run nodes + assert [c for c in app.stream(None, history[0].config, stream_mode="updates")] == [] + assert [c for c in app.stream(None, history[1].config, stream_mode="updates")] == [ + {"two": {"output": 5}}, + ] + assert [c for c in app.stream(None, history[2].config, stream_mode="updates")] == [ + {"one": {"inbox": 4}}, + {"__interrupt__": ()}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_fork_always_re_runs_nodes( + request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture +) -> None: + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + add_one = mocker.Mock(side_effect=lambda _: 1) + + builder = StateGraph(Annotated[int, operator.add]) + builder.add_node("add_one", add_one) + builder.add_edge(START, "add_one") + builder.add_conditional_edges("add_one", lambda cnt: "add_one" if cnt < 6 else END) + graph = builder.compile(checkpointer=checkpointer) + + thread1 = {"configurable": {"thread_id": "1"}} + + # start execution, stop at inbox + assert [*graph.stream(1, thread1, stream_mode=["values", "updates"])] == [ + ("values", 1), + ("updates", {"add_one": 1}), + ("values", 2), + ("updates", {"add_one": 1}), + ("values", 3), + ("updates", {"add_one": 1}), + ("values", 4), + ("updates", {"add_one": 1}), + ("values", 5), + ("updates", {"add_one": 1}), + ("values", 6), + ] + + # list history + history = [c for c in graph.get_state_history(thread1)] + assert history == [ + StateSnapshot( + values=6, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 5, + "writes": {"add_one": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[1].config, + ), + StateSnapshot( + values=5, + tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),), + next=("add_one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 4, + "writes": {"add_one": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[2].config, + ), + StateSnapshot( + values=4, + tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),), + next=("add_one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"add_one": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[3].config, + ), + StateSnapshot( + values=3, + tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),), + next=("add_one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 2, + "writes": {"add_one": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[4].config, + ), + StateSnapshot( + values=2, + tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),), + next=("add_one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"add_one": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[5].config, + ), + StateSnapshot( + values=1, + tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),), + next=("add_one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[6].config, + ), + StateSnapshot( + values=0, + tasks=(PregelTask(AnyStr(), "__start__", (PULL, "__start__"), result=1),), + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "step": -1, + "writes": {"__start__": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ), + ] + + # forking from any previous checkpoint should re-run nodes + assert [ + c for c in graph.stream(None, history[0].config, stream_mode="updates") + ] == [] + assert [ + c for c in graph.stream(None, history[1].config, stream_mode="updates") + ] == [ + {"add_one": 1}, + ] + assert [ + c for c in graph.stream(None, history[2].config, stream_mode="updates") + ] == [ + {"add_one": 1}, + {"add_one": 1}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_run_from_checkpoint_id_retains_previous_writes( + request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture +) -> None: + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + + class MyState(TypedDict): + myval: Annotated[int, operator.add] + otherval: bool + + class Anode: + def __init__(self): + self.switch = False + + def __call__(self, state: MyState): + self.switch = not self.switch + return {"myval": 2 if self.switch else 1, "otherval": self.switch} + + builder = StateGraph(MyState) + thenode = Anode() # Fun. + builder.add_node("node_one", thenode) + builder.add_node("node_two", thenode) + builder.add_edge(START, "node_one") + + def _getedge(src: str): + swap = "node_one" if src == "node_two" else "node_two" + + def _edge(st: MyState) -> Literal["__end__", "node_one", "node_two"]: + if st["myval"] > 3: + return END + if st["otherval"]: + return swap + return src + + return _edge + + builder.add_conditional_edges("node_one", _getedge("node_one")) + builder.add_conditional_edges("node_two", _getedge("node_two")) + graph = builder.compile(checkpointer=checkpointer) + + thread_id = uuid.uuid4() + thread1 = {"configurable": {"thread_id": str(thread_id)}} + + result = graph.invoke({"myval": 1}, thread1) + assert result["myval"] == 4 + history = [c for c in graph.get_state_history(thread1)] + + assert len(history) == 4 + assert history[-1].values == {"myval": 0} + assert history[0].values == {"myval": 4, "otherval": False} + + second_run_config = { + **thread1, + "configurable": { + **thread1["configurable"], + "checkpoint_id": history[1].config["configurable"]["checkpoint_id"], + }, + } + second_result = graph.invoke(None, second_run_config) + assert second_result == {"myval": 5, "otherval": True} + + new_history = [ + c + for c in graph.get_state_history( + {"configurable": {"thread_id": str(thread_id), "checkpoint_ns": ""}} + ) + ] + + assert len(new_history) == len(history) + 1 + for original, new in zip(history, new_history[1:]): + assert original.values == new.values + assert original.next == new.next + assert original.metadata["step"] == new.metadata["step"] + + def _get_tasks(hist: list, start: int): + return [h.tasks for h in hist[start:]] + + assert _get_tasks(new_history, 1) == _get_tasks(history, 0) + + +def test_invoke_two_processes_in_dict_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + two = ( + Channel.subscribe_to("inbox") + | RunnableLambda(add_one).batch + | RunnablePassthrough(lambda _: time.sleep(0.1)) + | Channel.write_to("output").batch + ) + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "inbox": Topic(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels=["input", "inbox"], + stream_channels=["output", "inbox"], + output_channels=["output"], + ) + + # [12 + 1, 2 + 1 + 1] + assert [ + *app.stream( + {"input": 2, "inbox": 12}, output_keys="output", stream_mode="updates" + ) + ] == [ + {"one": None}, + {"two": 13}, + {"two": 4}, + ] + assert [*app.stream({"input": 2, "inbox": 12}, output_keys="output")] == [ + 13, + 4, + ] + + assert [*app.stream({"input": 2, "inbox": 12}, stream_mode="updates")] == [ + {"one": {"inbox": 3}}, + {"two": {"output": 13}}, + {"two": {"output": 4}}, + ] + assert [*app.stream({"input": 2, "inbox": 12})] == [ + {"inbox": [3], "output": 13}, + {"output": 4}, + ] + assert [*app.stream({"input": 2, "inbox": 12}, stream_mode="debug")] == [ + { + "type": "task", + "timestamp": AnyStr(), + "step": 0, + "payload": { + "id": AnyStr(), + "name": "one", + "input": 2, + "triggers": ["input"], + }, + }, + { + "type": "task", + "timestamp": AnyStr(), + "step": 0, + "payload": { + "id": AnyStr(), + "name": "two", + "input": [12], + "triggers": ["inbox"], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 0, + "payload": { + "id": AnyStr(), + "name": "one", + "result": [("inbox", 3)], + "error": None, + "interrupts": [], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 0, + "payload": { + "id": AnyStr(), + "name": "two", + "result": [("output", 13)], + "error": None, + "interrupts": [], + }, + }, + { + "type": "task", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "two", + "input": [3], + "triggers": ["inbox"], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "two", + "result": [("output", 4)], + "error": None, + "interrupts": [], + }, + }, + ] + + +def test_batch_two_processes_in_out() -> None: + def add_one_with_delay(inp: int) -> int: + time.sleep(inp / 10) + return inp + 1 + + one = Channel.subscribe_to("input") | add_one_with_delay | Channel.write_to("one") + two = Channel.subscribe_to("one") | add_one_with_delay | Channel.write_to("output") + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "one": LastValue(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + + assert app.batch([3, 2, 1, 3, 5]) == [5, 4, 3, 5, 7] + assert app.batch([3, 2, 1, 3, 5], output_keys=["output"]) == [ + {"output": 5}, + {"output": 4}, + {"output": 3}, + {"output": 5}, + {"output": 7}, + ] + + graph = Graph() + graph.add_node("add_one", add_one_with_delay) + graph.add_node("add_one_more", add_one_with_delay) + graph.set_entry_point("add_one") + graph.set_finish_point("add_one_more") + graph.add_edge("add_one", "add_one_more") + gapp = graph.compile() + + assert gapp.batch([3, 2, 1, 3, 5]) == [5, 4, 3, 5, 7] + + +def test_invoke_many_processes_in_out(mocker: MockerFixture) -> None: + test_size = 100 + add_one = mocker.Mock(side_effect=lambda x: x + 1) + + nodes = {"-1": Channel.subscribe_to("input") | add_one | Channel.write_to("-1")} + for i in range(test_size - 2): + nodes[str(i)] = ( + Channel.subscribe_to(str(i - 1)) | add_one | Channel.write_to(str(i)) + ) + nodes["last"] = Channel.subscribe_to(str(i)) | add_one | Channel.write_to("output") + + app = Pregel( + nodes=nodes, + channels={str(i): LastValue(int) for i in range(-1, test_size - 2)} + | {"input": LastValue(int), "output": LastValue(int)}, + input_channels="input", + output_channels="output", + ) + + for _ in range(10): + assert app.invoke(2, {"recursion_limit": test_size}) == 2 + test_size + + with ThreadPoolExecutor() as executor: + assert [ + *executor.map(app.invoke, [2] * 10, [{"recursion_limit": test_size}] * 10) + ] == [2 + test_size] * 10 + + +def test_batch_many_processes_in_out(mocker: MockerFixture) -> None: + test_size = 100 + add_one = mocker.Mock(side_effect=lambda x: x + 1) + + nodes = {"-1": Channel.subscribe_to("input") | add_one | Channel.write_to("-1")} + for i in range(test_size - 2): + nodes[str(i)] = ( + Channel.subscribe_to(str(i - 1)) | add_one | Channel.write_to(str(i)) + ) + nodes["last"] = Channel.subscribe_to(str(i)) | add_one | Channel.write_to("output") + + app = Pregel( + nodes=nodes, + channels={str(i): LastValue(int) for i in range(-1, test_size - 2)} + | {"input": LastValue(int), "output": LastValue(int)}, + input_channels="input", + output_channels="output", + ) + + for _ in range(3): + assert app.batch([2, 1, 3, 4, 5], {"recursion_limit": test_size}) == [ + 2 + test_size, + 1 + test_size, + 3 + test_size, + 4 + test_size, + 5 + test_size, + ] + + with ThreadPoolExecutor() as executor: + assert [ + *executor.map( + app.batch, [[2, 1, 3, 4, 5]] * 3, [{"recursion_limit": test_size}] * 3 + ) + ] == [ + [2 + test_size, 1 + test_size, 3 + test_size, 4 + test_size, 5 + test_size] + ] * 3 + + +def test_invoke_two_processes_two_in_two_out_invalid(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + + one = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + two = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": one, "two": two}, + channels={"output": LastValue(int), "input": LastValue(int)}, + input_channels="input", + output_channels="output", + ) + + with pytest.raises(InvalidUpdateError): + # LastValue channels can only be updated once per iteration + app.invoke(2) + + class State(TypedDict): + hello: str + + def my_node(input: State) -> State: + return {"hello": "world"} + + builder = StateGraph(State) + builder.add_node("one", my_node) + builder.add_node("two", my_node) + builder.set_conditional_entry_point(lambda _: ["one", "two"]) + + graph = builder.compile() + with pytest.raises(InvalidUpdateError, match="At key 'hello'"): + graph.invoke({"hello": "there"}, debug=True) + + +def test_invoke_two_processes_two_in_two_out_valid(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + + one = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + two = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "input": LastValue(int), + "output": Topic(int), + }, + input_channels="input", + output_channels="output", + ) + + # An Inbox channel accumulates updates into a sequence + assert app.invoke(2) == [3, 3] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_invoke_checkpoint_two( + mocker: MockerFixture, request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer: BaseCheckpointSaver = request.getfixturevalue( + f"checkpointer_{checkpointer_name}" + ) + add_one = mocker.Mock(side_effect=lambda x: x["total"] + x["input"]) + errored_once = False + + def raise_if_above_10(input: int) -> int: + nonlocal errored_once + if input > 4: + if errored_once: + pass + else: + errored_once = True + raise ConnectionError("I will be retried") + if input > 10: + raise ValueError("Input is too large") + return input + + one = ( + Channel.subscribe_to(["input"]).join(["total"]) + | add_one + | Channel.write_to("output", "total") + | raise_if_above_10 + ) + + app = Pregel( + nodes={"one": one}, + channels={ + "total": BinaryOperatorAggregate(int, operator.add), + "input": LastValue(int), + "output": LastValue(int), + }, + input_channels="input", + output_channels="output", + checkpointer=checkpointer, + retry_policy=RetryPolicy(), + ) + + # total starts out as 0, so output is 0+2=2 + assert app.invoke(2, {"configurable": {"thread_id": "1"}}) == 2 + checkpoint = checkpointer.get({"configurable": {"thread_id": "1"}}) + assert checkpoint is not None + assert checkpoint["channel_values"].get("total") == 2 + # total is now 2, so output is 2+3=5 + assert app.invoke(3, {"configurable": {"thread_id": "1"}}) == 5 + assert errored_once, "errored and retried" + checkpoint_tup = checkpointer.get_tuple({"configurable": {"thread_id": "1"}}) + assert checkpoint_tup is not None + assert checkpoint_tup.checkpoint["channel_values"].get("total") == 7 + # total is now 2+5=7, so output would be 7+4=11, but raises ValueError + with pytest.raises(ValueError): + app.invoke(4, {"configurable": {"thread_id": "1"}}) + # checkpoint is not updated, error is recorded + checkpoint_tup = checkpointer.get_tuple({"configurable": {"thread_id": "1"}}) + assert checkpoint_tup is not None + assert checkpoint_tup.checkpoint["channel_values"].get("total") == 7 + assert checkpoint_tup.pending_writes == [ + (AnyStr(), ERROR, "ValueError('Input is too large')") + ] + # on a new thread, total starts out as 0, so output is 0+5=5 + assert app.invoke(5, {"configurable": {"thread_id": "2"}}) == 5 + checkpoint = checkpointer.get({"configurable": {"thread_id": "1"}}) + assert checkpoint is not None + assert checkpoint["channel_values"].get("total") == 7 + checkpoint = checkpointer.get({"configurable": {"thread_id": "2"}}) + assert checkpoint is not None + assert checkpoint["channel_values"].get("total") == 5 + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_pending_writes_resume( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer: BaseCheckpointSaver = request.getfixturevalue( + f"checkpointer_{checkpointer_name}" + ) + + class State(TypedDict): + value: Annotated[int, operator.add] + + class AwhileMaker: + def __init__(self, sleep: float, rtn: Union[Dict, Exception]) -> None: + self.sleep = sleep + self.rtn = rtn + self.reset() + + def __call__(self, input: State) -> Any: + self.calls += 1 + time.sleep(self.sleep) + if isinstance(self.rtn, Exception): + raise self.rtn + else: + return self.rtn + + def reset(self): + self.calls = 0 + + one = AwhileMaker(0.1, {"value": 2}) + two = AwhileMaker(0.3, ConnectionError("I'm not good")) + builder = StateGraph(State) + builder.add_node("one", one) + builder.add_node("two", two, retry=RetryPolicy(max_attempts=2)) + builder.add_edge(START, "one") + builder.add_edge(START, "two") + graph = builder.compile(checkpointer=checkpointer) + + thread1: RunnableConfig = {"configurable": {"thread_id": "1"}} + with pytest.raises(ConnectionError, match="I'm not good"): + graph.invoke({"value": 1}, thread1) + + # both nodes should have been called once + assert one.calls == 1 + assert two.calls == 2 # two attempts + + # latest checkpoint should be before nodes "one", "two" + # but we should have applied the write from "one" + state = graph.get_state(thread1) + assert state is not None + assert state.values == {"value": 3} + assert state.next == ("two",) + assert state.tasks == ( + PregelTask(AnyStr(), "one", (PULL, "one"), result={"value": 2}), + PregelTask(AnyStr(), "two", (PULL, "two"), 'ConnectionError("I\'m not good")'), + ) + assert state.metadata == { + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "1", + } + # get_state with checkpoint_id should not apply any pending writes + state = graph.get_state(state.config) + assert state is not None + assert state.values == {"value": 1} + assert state.next == ("one", "two") + # should contain pending write of "one" + checkpoint = checkpointer.get_tuple(thread1) + assert checkpoint is not None + # should contain error from "two" + expected_writes = [ + (AnyStr(), "one", "one"), + (AnyStr(), "value", 2), + (AnyStr(), ERROR, 'ConnectionError("I\'m not good")'), + ] + assert len(checkpoint.pending_writes) == 3 + assert all(w in expected_writes for w in checkpoint.pending_writes) + # both non-error pending writes come from same task + non_error_writes = [w for w in checkpoint.pending_writes if w[1] != ERROR] + assert non_error_writes[0][0] == non_error_writes[1][0] + # error write is from the other task + error_write = next(w for w in checkpoint.pending_writes if w[1] == ERROR) + assert error_write[0] != non_error_writes[0][0] + + # resume execution + with pytest.raises(ConnectionError, match="I'm not good"): + graph.invoke(None, thread1) + + # node "one" succeeded previously, so shouldn't be called again + assert one.calls == 1 + # node "two" should have been called once again + assert two.calls == 4 # two attempts before + two attempts now + + # confirm no new checkpoints saved + state_two = graph.get_state(thread1) + assert state_two.metadata == state.metadata + + # resume execution, without exception + two.rtn = {"value": 3} + # both the pending write and the new write were applied, 1 + 2 + 3 = 6 + assert graph.invoke(None, thread1) == {"value": 6} + + # check all final checkpoints + checkpoints = [c for c in checkpointer.list(thread1)] + # we should have 3 + assert len(checkpoints) == 3 + # the last one not too interesting for this test + assert checkpoints[0] == CheckpointTuple( + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + checkpoint={ + "v": 1, + "id": AnyStr(), + "ts": AnyStr(), + "pending_sends": [], + "versions_seen": { + "one": { + "start:one": AnyVersion(), + }, + "two": { + "start:two": AnyVersion(), + }, + "__input__": {}, + "__start__": { + "__start__": AnyVersion(), + }, + "__interrupt__": { + "value": AnyVersion(), + "__start__": AnyVersion(), + "start:one": AnyVersion(), + "start:two": AnyVersion(), + }, + }, + "channel_versions": { + "one": AnyVersion(), + "two": AnyVersion(), + "value": AnyVersion(), + "__start__": AnyVersion(), + "start:one": AnyVersion(), + "start:two": AnyVersion(), + }, + "channel_values": {"one": "one", "two": "two", "value": 6}, + }, + metadata={ + "parents": {}, + "step": 1, + "source": "loop", + "writes": {"one": {"value": 2}, "two": {"value": 3}}, + "thread_id": "1", + }, + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": checkpoints[1].config["configurable"]["checkpoint_id"], + } + }, + pending_writes=[], + ) + # the previous one we assert that pending writes contains both + # - original error + # - successful writes from resuming after preventing error + assert checkpoints[1] == CheckpointTuple( + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + checkpoint={ + "v": 1, + "id": AnyStr(), + "ts": AnyStr(), + "pending_sends": [], + "versions_seen": { + "__input__": {}, + "__start__": { + "__start__": AnyVersion(), + }, + }, + "channel_versions": { + "value": AnyVersion(), + "__start__": AnyVersion(), + "start:one": AnyVersion(), + "start:two": AnyVersion(), + }, + "channel_values": { + "value": 1, + "start:one": "__start__", + "start:two": "__start__", + }, + }, + metadata={ + "parents": {}, + "step": 0, + "source": "loop", + "writes": None, + "thread_id": "1", + }, + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": checkpoints[2].config["configurable"]["checkpoint_id"], + } + }, + pending_writes=UnsortedSequence( + (AnyStr(), "one", "one"), + (AnyStr(), "value", 2), + (AnyStr(), "__error__", 'ConnectionError("I\'m not good")'), + (AnyStr(), "two", "two"), + (AnyStr(), "value", 3), + ), + ) + assert checkpoints[2] == CheckpointTuple( + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + checkpoint={ + "v": 1, + "id": AnyStr(), + "ts": AnyStr(), + "pending_sends": [], + "versions_seen": {"__input__": {}}, + "channel_versions": { + "__start__": AnyVersion(), + }, + "channel_values": {"__start__": {"value": 1}}, + }, + metadata={ + "parents": {}, + "step": -1, + "source": "input", + "writes": {"__start__": {"value": 1}}, + "thread_id": "1", + }, + parent_config=None, + pending_writes=UnsortedSequence( + (AnyStr(), "value", 1), + (AnyStr(), "start:one", "__start__"), + (AnyStr(), "start:two", "__start__"), + ), + ) + + +def test_cond_edge_after_send() -> None: + class Node: + def __init__(self, name: str): + self.name = name + self.__name__ = name + + def __call__(self, state): + return [self.name] + + def send_for_fun(state): + return [Send("2", state), Send("2", state)] + + def route_to_three(state) -> Literal["3"]: + return "3" + + builder = StateGraph(Annotated[list, operator.add]) + builder.add_node(Node("1")) + builder.add_node(Node("2")) + builder.add_node(Node("3")) + builder.add_edge(START, "1") + builder.add_conditional_edges("1", send_for_fun) + builder.add_conditional_edges("2", route_to_three) + graph = builder.compile() + assert graph.invoke(["0"]) == ["0", "1", "2", "2", "3"] + + +def test_concurrent_emit_sends() -> None: + class Node: + def __init__(self, name: str): + self.name = name + self.__name__ = name + + def __call__(self, state): + return ( + [self.name] + if isinstance(state, list) + else ["|".join((self.name, str(state)))] + ) + + def send_for_fun(state): + return [Send("2", 1), Send("2", 2), "3.1"] + + def send_for_profit(state): + return [Send("2", 3), Send("2", 4)] + + def route_to_three(state) -> Literal["3"]: + return "3" + + builder = StateGraph(Annotated[list, operator.add]) + builder.add_node(Node("1")) + builder.add_node(Node("1.1")) + builder.add_node(Node("2")) + builder.add_node(Node("3")) + builder.add_node(Node("3.1")) + builder.add_edge(START, "1") + builder.add_edge(START, "1.1") + builder.add_conditional_edges("1", send_for_fun) + builder.add_conditional_edges("1.1", send_for_profit) + builder.add_conditional_edges("2", route_to_three) + graph = builder.compile() + assert graph.invoke(["0"]) == ( + [ + "0", + "1", + "1.1", + "2|1", + "2|2", + "2|3", + "2|4", + "3", + "3.1", + ] + if FF_SEND_V2 + else [ + "0", + "1", + "1.1", + "3.1", + "2|1", + "2|2", + "2|3", + "2|4", + "3", + ] + ) + + +def test_send_sequences() -> None: + class Node: + def __init__(self, name: str): + self.name = name + self.__name__ = name + + def __call__(self, state): + update = ( + [self.name] + if isinstance(state, list) + else ["|".join((self.name, str(state)))] + ) + if isinstance(state, Command): + return replace(state, update=update) + else: + return update + + def send_for_fun(state): + return [ + Send("2", GraphCommand(send=Send("2", 3))), + Send("2", GraphCommand(send=Send("2", 4))), + "3.1", + ] + + def route_to_three(state) -> Literal["3"]: + return "3" + + builder = StateGraph(Annotated[list, operator.add]) + builder.add_node(Node("1")) + builder.add_node(Node("2")) + builder.add_node(Node("3")) + builder.add_node(Node("3.1")) + builder.add_edge(START, "1") + builder.add_conditional_edges("1", send_for_fun) + builder.add_conditional_edges("2", route_to_three) + graph = builder.compile() + assert ( + graph.invoke(["0"]) + == [ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='2', arg=4))", + "2|3", + "2|4", + "3", + "3.1", + ] + if FF_SEND_V2 + else [ + "0", + "1", + "3.1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='2', arg=4))", + "3", + "2|3", + "2|4", + "3", + ] + ) + + +@pytest.mark.repeat(20) +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_send_dedupe_on_resume( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + if not FF_SEND_V2: + pytest.skip("Send deduplication is only available in Send V2") + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + + class InterruptOnce: + ticks: int = 0 + + def __call__(self, state): + self.ticks += 1 + if self.ticks == 1: + raise NodeInterrupt("Bahh") + return ["|".join(("flaky", str(state)))] + + class Node: + def __init__(self, name: str): + self.name = name + self.ticks = 0 + self.__name__ = name + + def __call__(self, state): + self.ticks += 1 + update = ( + [self.name] + if isinstance(state, list) + else ["|".join((self.name, str(state)))] + ) + if isinstance(state, GraphCommand): + return replace(state, update=update) + else: + return update + + def send_for_fun(state): + return [ + Send("2", GraphCommand(send=Send("2", 3))), + Send("2", GraphCommand(send=Send("flaky", 4))), + "3.1", + ] + + def route_to_three(state) -> Literal["3"]: + return "3" + + builder = StateGraph(Annotated[list, operator.add]) + builder.add_node(Node("1")) + builder.add_node(Node("2")) + builder.add_node(Node("3")) + builder.add_node(Node("3.1")) + builder.add_node("flaky", InterruptOnce()) + builder.add_edge(START, "1") + builder.add_conditional_edges("1", send_for_fun) + builder.add_conditional_edges("2", route_to_three) + + graph = builder.compile(checkpointer=checkpointer) + thread1 = {"configurable": {"thread_id": "1"}} + assert graph.invoke(["0"], thread1, debug=1) == [ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='flaky', arg=4))", + "2|3", + ] + assert builder.nodes["2"].runnable.func.ticks == 3 + assert builder.nodes["flaky"].runnable.func.ticks == 1 + # check state + state = graph.get_state(thread1) + assert state.next == ("flaky",) + # check history + history = [c for c in graph.get_state_history(thread1)] + assert len(history) == 2 + # resume execution + assert graph.invoke(None, thread1, debug=1) == [ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='flaky', arg=4))", + "2|3", + "flaky|4", + "3", + "3.1", + ] + # node "2" doesn't get called again, as we recover writes saved before + assert builder.nodes["2"].runnable.func.ticks == 3 + # node "flaky" gets called again, as it was interrupted + assert builder.nodes["flaky"].runnable.func.ticks == 2 + # check state + state = graph.get_state(thread1) + assert state.next == () + # check history + history = [c for c in graph.get_state_history(thread1)] + assert ( + history[1] + == [ + StateSnapshot( + values=[ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='flaky', arg=4))", + "2|3", + "flaky|4", + "3", + "3.1", + ], + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": {"3": ["3"], "3.1": ["3.1"]}, + "thread_id": "1", + "step": 2, + "parents": {}, + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=(), + ), + StateSnapshot( + values=[ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='flaky', arg=4))", + "2|3", + "flaky|4", + ], + next=("3", "3.1"), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": { + "1": ["1"], + "2": [ + ["2|Command(send=Send(node='2', arg=3))"], + ["2|Command(send=Send(node='flaky', arg=4))"], + ["2|3"], + ], + "flaky": ["flaky|4"], + }, + "thread_id": "1", + "step": 1, + "parents": {}, + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="3", + path=("__pregel_pull", "3"), + error=None, + interrupts=(), + state=None, + result=["3"], + ), + PregelTask( + id=AnyStr(), + name="3.1", + path=("__pregel_pull", "3.1"), + error=None, + interrupts=(), + state=None, + result=["3.1"], + ), + ), + ), + StateSnapshot( + values=["0"], + next=("1", "2", "2", "2", "flaky"), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": None, + "thread_id": "1", + "step": 0, + "parents": {}, + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="1", + path=("__pregel_pull", "1"), + error=None, + interrupts=(), + state=None, + result=["1"], + ), + PregelTask( + id=AnyStr(), + name="2", + path=( + "__pregel_push", + ("__pregel_pull", "1"), + 2, + AnyStr(), + ), + error=None, + interrupts=(), + state=None, + result=["2|Command(send=Send(node='2', arg=3))"], + ), + PregelTask( + id=AnyStr(), + name="2", + path=( + "__pregel_push", + ("__pregel_pull", "1"), + 3, + AnyStr(), + ), + error=None, + interrupts=(), + state=None, + result=["2|Command(send=Send(node='flaky', arg=4))"], + ), + PregelTask( + id=AnyStr(), + name="2", + path=( + "__pregel_push", + ( + "__pregel_push", + ("__pregel_pull", "1"), + 2, + AnyStr(), + ), + 2, + AnyStr(), + ), + error=None, + interrupts=(), + state=None, + result=["2|3"], + ), + PregelTask( + id=AnyStr(), + name="flaky", + path=( + "__pregel_push", + ( + "__pregel_push", + ("__pregel_pull", "1"), + 3, + AnyStr(), + ), + 2, + AnyStr(), + ), + error=None, + interrupts=(Interrupt(value="Bahh", when="during"),), + state=None, + result=["flaky|4"], + ), + ), + ), + StateSnapshot( + values=[], + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "input", + "writes": {"__start__": ["0"]}, + "thread_id": "1", + "step": -1, + "parents": {}, + }, + created_at=AnyStr(), + parent_config=None, + tasks=( + PregelTask( + id=AnyStr(), + name="__start__", + path=("__pregel_pull", "__start__"), + error=None, + interrupts=(), + state=None, + result=["0"], + ), + ), + ), + ][1] + ) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_send_react_interrupt( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage + + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + + ai_message = AIMessage( + "", + id="ai1", + tool_calls=[ToolCall(name="foo", args={"hi": [1, 2, 3]}, id=AnyStr())], + ) + + def agent(state): + return {"messages": ai_message} + + def route(state): + if isinstance(state["messages"][-1], AIMessage): + return [ + Send(call["name"], call) for call in state["messages"][-1].tool_calls + ] + + foo_called = 0 + + def foo(call: ToolCall): + nonlocal foo_called + foo_called += 1 + return {"messages": ToolMessage(str(call["args"]), tool_call_id=call["id"])} + + builder = StateGraph(MessagesState) + builder.add_node(agent) + builder.add_node(foo) + builder.add_edge(START, "agent") + builder.add_conditional_edges("agent", route) + graph = builder.compile() + + assert graph.invoke({"messages": [HumanMessage("hello")]}) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + _AnyIdToolMessage( + content="{'hi': [1, 2, 3]}", + tool_call_id=AnyStr(), + ), + ] + } + assert foo_called == 1 + + # simple interrupt-resume flow + foo_called = 0 + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"]) + thread1 = {"configurable": {"thread_id": "1"}} + assert graph.invoke({"messages": [HumanMessage("hello")]}, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + } + assert foo_called == 0 + assert graph.invoke(None, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + _AnyIdToolMessage( + content="{'hi': [1, 2, 3]}", + tool_call_id=AnyStr(), + ), + ] + } + assert foo_called == 1 + + # interrupt-update-resume flow + foo_called = 0 + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"]) + thread1 = {"configurable": {"thread_id": "2"}} + assert graph.invoke({"messages": [HumanMessage("hello")]}, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + } + assert foo_called == 0 + + if not FF_SEND_V2: + return + + # get state should show the pending task + state = graph.get_state(thread1) + assert state == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + }, + next=("foo",), + config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 0, + "source": "loop", + "writes": None, + "parents": {}, + "thread_id": "2", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + content="", + additional_kwargs={}, + response_metadata={}, + id="ai1", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ) + }, + ), + PregelTask( + id=AnyStr(), + name="foo", + path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()), + error=None, + interrupts=(), + state=None, + result=None, + ), + ), + ) + + # remove the tool call, clearing the pending task + graph.update_state( + thread1, {"messages": AIMessage("Bye now", id=ai_message.id, tool_calls=[])} + ) + + # tool call no longer in pending tasks + assert graph.get_state(thread1) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="Bye now", + tool_calls=[], + ), + ] + }, + next=(), + config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 1, + "source": "update", + "writes": { + "agent": { + "messages": _AnyIdAIMessage( + content="Bye now", + tool_calls=[], + ) + } + }, + "parents": {}, + "thread_id": "2", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=(), + ) + + # tool call not executed + assert graph.invoke(None, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage(content="Bye now"), + ] + } + assert foo_called == 0 + + # interrupt-update-resume flow, creating new Send in update call + foo_called = 0 + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"]) + thread1 = {"configurable": {"thread_id": "3"}} + assert graph.invoke({"messages": [HumanMessage("hello")]}, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + } + assert foo_called == 0 + + # get state should show the pending task + state = graph.get_state(thread1) + assert state == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + }, + next=("foo",), + config={ + "configurable": { + "thread_id": "3", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 0, + "source": "loop", + "writes": None, + "parents": {}, + "thread_id": "3", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "3", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + "", + id="ai1", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ) + }, + ), + PregelTask( + id=AnyStr(), + name="foo", + path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()), + error=None, + interrupts=(), + state=None, + result=None, + ), + ), + ) + + # replace the tool call, should clear previous send, create new one + graph.update_state( + thread1, + { + "messages": AIMessage( + "", + id=ai_message.id, + tool_calls=[ + { + "name": "foo", + "args": {"hi": [4, 5, 6]}, + "id": "tool1", + "type": "tool_call", + } + ], + ) + }, + ) + + # prev tool call no longer in pending tasks, new tool call is + assert graph.get_state(thread1) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [4, 5, 6]}, + "id": "tool1", + "type": "tool_call", + } + ], + ), + ] + }, + next=("foo",), + config={ + "configurable": { + "thread_id": "3", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 1, + "source": "update", + "writes": { + "agent": { + "messages": _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [4, 5, 6]}, + "id": "tool1", + "type": "tool_call", + } + ], + ) + } + }, + "parents": {}, + "thread_id": "3", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "3", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="foo", + path=("__pregel_push", (), 0, AnyStr()), + error=None, + interrupts=(), + state=None, + result=None, + ), + ), + ) + + # prev tool call not executed, new tool call is + assert graph.invoke(None, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + AIMessage( + "", + id="ai1", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [4, 5, 6]}, + "id": "tool1", + "type": "tool_call", + } + ], + ), + _AnyIdToolMessage(content="{'hi': [4, 5, 6]}", tool_call_id="tool1"), + ] + } + assert foo_called == 1 + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_send_react_interrupt_control( + request: pytest.FixtureRequest, checkpointer_name: str, snapshot: SnapshotAssertion +) -> None: + from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage + + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + + ai_message = AIMessage( + "", + id="ai1", + tool_calls=[ToolCall(name="foo", args={"hi": [1, 2, 3]}, id=AnyStr())], + ) + + def agent(state) -> GraphCommand[Literal["foo"]]: + return GraphCommand( + update={"messages": ai_message}, + send=[Send(call["name"], call) for call in ai_message.tool_calls], + ) + + foo_called = 0 + + def foo(call: ToolCall): + nonlocal foo_called + foo_called += 1 + return {"messages": ToolMessage(str(call["args"]), tool_call_id=call["id"])} + + builder = StateGraph(MessagesState) + builder.add_node(agent) + builder.add_node(foo) + builder.add_edge(START, "agent") + graph = builder.compile() + assert graph.get_graph().draw_mermaid() == snapshot + + assert graph.invoke({"messages": [HumanMessage("hello")]}) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + _AnyIdToolMessage( + content="{'hi': [1, 2, 3]}", + tool_call_id=AnyStr(), + ), + ] + } + assert foo_called == 1 + + # simple interrupt-resume flow + foo_called = 0 + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"]) + thread1 = {"configurable": {"thread_id": "1"}} + assert graph.invoke({"messages": [HumanMessage("hello")]}, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + } + assert foo_called == 0 + assert graph.invoke(None, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + _AnyIdToolMessage( + content="{'hi': [1, 2, 3]}", + tool_call_id=AnyStr(), + ), + ] + } + assert foo_called == 1 + + if not FF_SEND_V2: + return + + # interrupt-update-resume flow + foo_called = 0 + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"]) + thread1 = {"configurable": {"thread_id": "2"}} + assert graph.invoke({"messages": [HumanMessage("hello")]}, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + } + assert foo_called == 0 + + # get state should show the pending task + state = graph.get_state(thread1) + assert state == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + }, + next=("foo",), + config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 0, + "source": "loop", + "writes": None, + "parents": {}, + "thread_id": "2", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + content="", + additional_kwargs={}, + response_metadata={}, + id="ai1", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ) + }, + ), + PregelTask( + id=AnyStr(), + name="foo", + path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()), + error=None, + interrupts=(), + state=None, + result=None, + ), + ), + ) + + # remove the tool call, clearing the pending task + graph.update_state( + thread1, {"messages": AIMessage("Bye now", id=ai_message.id, tool_calls=[])} + ) + + # tool call no longer in pending tasks + assert graph.get_state(thread1) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="Bye now", + tool_calls=[], + ), + ] + }, + next=(), + config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 1, + "source": "update", + "writes": { + "agent": { + "messages": _AnyIdAIMessage( + content="Bye now", + tool_calls=[], + ) + } + }, + "parents": {}, + "thread_id": "2", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=(), + ) + + # tool call not executed + assert graph.invoke(None, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage(content="Bye now"), + ] + } + assert foo_called == 0 + + # interrupt-update-resume flow, creating new Send in update call + + # TODO add here test with invoke(Command()) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_invoke_checkpoint_three( + mocker: MockerFixture, request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + adder = mocker.Mock(side_effect=lambda x: x["total"] + x["input"]) + + def raise_if_above_10(input: int) -> int: + if input > 10: + raise ValueError("Input is too large") + return input + + one = ( + Channel.subscribe_to(["input"]).join(["total"]) + | adder + | Channel.write_to("output", "total") + | raise_if_above_10 + ) + + app = Pregel( + nodes={"one": one}, + channels={ + "total": BinaryOperatorAggregate(int, operator.add), + "input": LastValue(int), + "output": LastValue(int), + }, + input_channels="input", + output_channels="output", + checkpointer=checkpointer, + ) + + thread_1 = {"configurable": {"thread_id": "1"}} + # total starts out as 0, so output is 0+2=2 + assert app.invoke(2, thread_1, debug=1) == 2 + state = app.get_state(thread_1) + assert state is not None + assert state.values.get("total") == 2 + assert state.next == () + assert ( + state.config["configurable"]["checkpoint_id"] + == checkpointer.get(thread_1)["id"] + ) + # total is now 2, so output is 2+3=5 + assert app.invoke(3, thread_1) == 5 + state = app.get_state(thread_1) + assert state is not None + assert state.values.get("total") == 7 + assert ( + state.config["configurable"]["checkpoint_id"] + == checkpointer.get(thread_1)["id"] + ) + # total is now 2+5=7, so output would be 7+4=11, but raises ValueError + with pytest.raises(ValueError): + app.invoke(4, thread_1) + # checkpoint is updated with new input + state = app.get_state(thread_1) + assert state is not None + assert state.values.get("total") == 7 + assert state.next == ("one",) + """we checkpoint inputs and it failed on "one", so the next node is one""" + # we can recover from error by sending new inputs + assert app.invoke(2, thread_1) == 9 + state = app.get_state(thread_1) + assert state is not None + assert state.values.get("total") == 16, "total is now 7+9=16" + assert state.next == () + + thread_2 = {"configurable": {"thread_id": "2"}} + # on a new thread, total starts out as 0, so output is 0+5=5 + assert app.invoke(5, thread_2, debug=True) == 5 + state = app.get_state({"configurable": {"thread_id": "1"}}) + assert state is not None + assert state.values.get("total") == 16 + assert state.next == (), "checkpoint of other thread not touched" + state = app.get_state(thread_2) + assert state is not None + assert state.values.get("total") == 5 + assert state.next == () + + assert len(list(app.get_state_history(thread_1, limit=1))) == 1 + # list all checkpoints for thread 1 + thread_1_history = [c for c in app.get_state_history(thread_1)] + # there are 7 checkpoints + assert len(thread_1_history) == 7 + assert Counter(c.metadata["source"] for c in thread_1_history) == { + "input": 4, + "loop": 3, + } + # sorted descending + assert ( + thread_1_history[0].config["configurable"]["checkpoint_id"] + > thread_1_history[1].config["configurable"]["checkpoint_id"] + ) + # cursor pagination + cursored = list( + app.get_state_history(thread_1, limit=1, before=thread_1_history[0].config) + ) + assert len(cursored) == 1 + assert cursored[0].config == thread_1_history[1].config + # the last checkpoint + assert thread_1_history[0].values["total"] == 16 + # the first "loop" checkpoint + assert thread_1_history[-2].values["total"] == 2 + # can get each checkpoint using aget with config + assert ( + checkpointer.get(thread_1_history[0].config)["id"] + == thread_1_history[0].config["configurable"]["checkpoint_id"] + ) + assert ( + checkpointer.get(thread_1_history[1].config)["id"] + == thread_1_history[1].config["configurable"]["checkpoint_id"] + ) + + thread_1_next_config = app.update_state(thread_1_history[1].config, 10) + # update creates a new checkpoint + assert ( + thread_1_next_config["configurable"]["checkpoint_id"] + > thread_1_history[0].config["configurable"]["checkpoint_id"] + ) + # update makes new checkpoint child of the previous one + assert ( + app.get_state(thread_1_next_config).parent_config == thread_1_history[1].config + ) + # 1 more checkpoint in history + assert len(list(app.get_state_history(thread_1))) == 8 + assert Counter(c.metadata["source"] for c in app.get_state_history(thread_1)) == { + "update": 1, + "input": 4, + "loop": 3, + } + # the latest checkpoint is the updated one + assert app.get_state(thread_1) == app.get_state(thread_1_next_config) + + +def test_invoke_two_processes_two_in_join_two_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + add_10_each = mocker.Mock(side_effect=lambda x: sorted(y + 10 for y in x)) + + one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + chain_three = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + chain_four = ( + Channel.subscribe_to("inbox") | add_10_each | Channel.write_to("output") + ) + + app = Pregel( + nodes={ + "one": one, + "chain_three": chain_three, + "chain_four": chain_four, + }, + channels={ + "inbox": Topic(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + + # Then invoke app + # We get a single array result as chain_four waits for all publishers to finish + # before operating on all elements published to topic_two as an array + for _ in range(100): + assert app.invoke(2) == [13, 13] + + with ThreadPoolExecutor() as executor: + assert [*executor.map(app.invoke, [2] * 100)] == [[13, 13]] * 100 + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_invoke_join_then_call_other_pregel( + mocker: MockerFixture, request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + + add_one = mocker.Mock(side_effect=lambda x: x + 1) + add_10_each = mocker.Mock(side_effect=lambda x: [y + 10 for y in x]) + + inner_app = Pregel( + nodes={ + "one": Channel.subscribe_to("input") | add_one | Channel.write_to("output") + }, + channels={ + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + + one = ( + Channel.subscribe_to("input") + | add_10_each + | Channel.write_to("inbox_one").map() + ) + two = ( + Channel.subscribe_to("inbox_one") + | inner_app.map() + | sorted + | Channel.write_to("outbox_one") + ) + chain_three = Channel.subscribe_to("outbox_one") | sum | Channel.write_to("output") + + app = Pregel( + nodes={ + "one": one, + "two": two, + "chain_three": chain_three, + }, + channels={ + "inbox_one": Topic(int), + "outbox_one": LastValue(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + + for _ in range(10): + assert app.invoke([2, 3]) == 27 + + with ThreadPoolExecutor() as executor: + assert [*executor.map(app.invoke, [[2, 3]] * 10)] == [27] * 10 + + # add checkpointer + app.checkpointer = checkpointer + # subgraph is called twice in the same node, through .map(), so raises + with pytest.raises(MultipleSubgraphsError): + app.invoke([2, 3], {"configurable": {"thread_id": "1"}}) + + # set inner graph checkpointer NeverCheckpoint + inner_app.checkpointer = False + # subgraph still called twice, but checkpointing for inner graph is disabled + assert app.invoke([2, 3], {"configurable": {"thread_id": "1"}}) == 27 + + +def test_invoke_two_processes_one_in_two_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + + one = ( + Channel.subscribe_to("input") | add_one | Channel.write_to("output", "between") + ) + two = Channel.subscribe_to("between") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "input": LastValue(int), + "between": LastValue(int), + "output": LastValue(int), + }, + stream_channels=["output", "between"], + input_channels="input", + output_channels="output", + ) + + assert [c for c in app.stream(2, stream_mode="updates")] == [ + {"one": {"between": 3, "output": 3}}, + {"two": {"output": 4}}, + ] + assert [c for c in app.stream(2)] == [ + {"between": 3, "output": 3}, + {"between": 3, "output": 4}, + ] + + +def test_invoke_two_processes_no_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + one = Channel.subscribe_to("input") | add_one | Channel.write_to("between") + two = Channel.subscribe_to("between") | add_one + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "input": LastValue(int), + "between": LastValue(int), + "output": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + + # It finishes executing (once no more messages being published) + # but returns nothing, as nothing was published to OUT topic + assert app.invoke(2) is None + + +def test_invoke_two_processes_no_in(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + + one = Channel.subscribe_to("between") | add_one | Channel.write_to("output") + two = Channel.subscribe_to("between") | add_one + + with pytest.raises(TypeError): + Pregel(nodes={"one": one, "two": two}) + + +def test_channel_enter_exit_timing(mocker: MockerFixture) -> None: + setup = mocker.Mock() + cleanup = mocker.Mock() + + @contextmanager + def an_int() -> Generator[int, None, None]: + setup() + try: + yield 5 + finally: + cleanup() + + add_one = mocker.Mock(side_effect=lambda x: x + 1) + one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + two = ( + Channel.subscribe_to("inbox") + | RunnableLambda(add_one).batch + | Channel.write_to("output").batch + ) + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "inbox": Topic(int), + "ctx": Context(an_int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels=["inbox", "output"], + stream_channels=["inbox", "output"], + ) + + assert setup.call_count == 0 + assert cleanup.call_count == 0 + for i, chunk in enumerate(app.stream(2)): + assert setup.call_count == 1, "Expected setup to be called once" + if i == 0: + assert chunk == {"inbox": [3]} + elif i == 1: + assert chunk == {"output": 4} + else: + pytest.fail("Expected only two chunks") + assert cleanup.call_count == 1, "Expected cleanup to be called once" + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_conditional_graph( + snapshot: SnapshotAssertion, request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + from langchain_core.language_models.fake import FakeStreamingListLLM + from langchain_core.prompts import PromptTemplate + from langchain_core.runnables import RunnablePassthrough + from langchain_core.tools import tool + + checkpointer: BaseCheckpointSaver = request.getfixturevalue( + f"checkpointer_{checkpointer_name}" + ) + + # Assemble the tools + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + # Construct the agent + prompt = PromptTemplate.from_template("Hello!") + + llm = FakeStreamingListLLM( + responses=[ + "tool:search_api:query", + "tool:search_api:another", + "finish:answer", + ] + ) + + def agent_parser(input: str) -> Union[AgentAction, AgentFinish]: + if input.startswith("finish"): + _, answer = input.split(":") + return AgentFinish(return_values={"answer": answer}, log=input) + else: + _, tool_name, tool_input = input.split(":") + return AgentAction(tool=tool_name, tool_input=tool_input, log=input) + + agent = RunnablePassthrough.assign(agent_outcome=prompt | llm | agent_parser) + + # Define tool execution logic + def execute_tools(data: dict) -> dict: + data = data.copy() + agent_action: AgentAction = data.pop("agent_outcome") + observation = {t.name: t for t in tools}[agent_action.tool].invoke( + agent_action.tool_input + ) + if data.get("intermediate_steps") is None: + data["intermediate_steps"] = [] + else: + data["intermediate_steps"] = data["intermediate_steps"].copy() + data["intermediate_steps"].append([agent_action, observation]) + return data + + # Define decision-making logic + def should_continue(data: dict) -> str: + # Logic to decide whether to continue in the loop or exit + if isinstance(data["agent_outcome"], AgentFinish): + return "exit" + else: + return "continue" + + # Define a new graph + workflow = Graph() + + workflow.add_node("agent", agent) + workflow.add_node( + "tools", + execute_tools, + metadata={"parents": {}, "version": 2, "variant": "b"}, + ) + + workflow.set_entry_point("agent") + + workflow.add_conditional_edges( + "agent", should_continue, {"continue": "tools", "exit": END} + ) + + workflow.add_edge("tools", "agent") + + app = workflow.compile() + + if SHOULD_CHECK_SNAPSHOTS: + assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + assert app.get_graph().draw_mermaid() == snapshot + assert json.dumps(app.get_graph(xray=True).to_json(), indent=2) == snapshot + assert app.get_graph(xray=True).draw_mermaid(with_styles=False) == snapshot + + assert app.invoke({"input": "what is weather in sf"}) == { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + } + + assert [c for c in app.stream({"input": "what is weather in sf"})] == [ + { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + } + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + } + }, + ] + + # test state get/update methods with interrupt_after + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["agent"], + ) + config = {"configurable": {"thread_id": "1"}} + + if SHOULD_CHECK_SNAPSHOTS: + assert app_w_interrupt.get_graph().to_json() == snapshot + assert app_w_interrupt.get_graph().draw_mermaid() == snapshot + + assert [ + c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config) + ] == [ + { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + } + } + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + }, + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + config=app_w_interrupt.checkpointer.get_tuple(config).config, + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": { + "agent": { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + assert ( + app_w_interrupt.checkpointer.get_tuple(config).config["configurable"][ + "checkpoint_id" + ] + is not None + ) + + app_w_interrupt.update_state( + config, + { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + ) + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + } + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + ] + + app_w_interrupt.update_state( + config, + { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + }, + ) + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + }, + }, + tasks=(), + next=(), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 4, + "writes": { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + } + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # test state get/update methods with interrupt_before + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before=["tools"], + ) + config = {"configurable": {"thread_id": "2"}} + llm.i = 0 # reset the llm + + assert [ + c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config) + ] == [ + { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + } + } + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + }, + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": { + "agent": { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + } + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + app_w_interrupt.update_state( + config, + { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + ) + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + } + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + ] + + app_w_interrupt.update_state( + config, + { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + }, + ) + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + }, + }, + tasks=(), + next=(), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 4, + "writes": { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + } + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # test re-invoke to continue with interrupt_before + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before=["tools"], + ) + config = {"configurable": {"thread_id": "3"}} + llm.i = 0 # reset the llm + + assert [ + c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config) + ] == [ + { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + } + } + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + }, + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": { + "agent": { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + } + }, + "thread_id": "3", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + }, + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + ] + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + } + }, + ] + + +def test_conditional_entrypoint_graph(snapshot: SnapshotAssertion) -> None: + def left(data: str) -> str: + return data + "->left" + + def right(data: str) -> str: + return data + "->right" + + def should_start(data: str) -> str: + # Logic to decide where to start + if len(data) > 10: + return "go-right" + else: + return "go-left" + + # Define a new graph + workflow = Graph() + + workflow.add_node("left", left) + workflow.add_node("right", right) + + workflow.set_conditional_entry_point( + should_start, {"go-left": "left", "go-right": "right"} + ) + + workflow.add_conditional_edges("left", lambda data: END, {END: END}) + workflow.add_edge("right", END) + + app = workflow.compile() + + if SHOULD_CHECK_SNAPSHOTS: + assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + + assert ( + app.invoke("what is weather in sf", debug=True) + == "what is weather in sf->right" + ) + + assert [*app.stream("what is weather in sf")] == [ + {"right": "what is weather in sf->right"}, + ] + + +def test_conditional_entrypoint_to_multiple_state_graph( + snapshot: SnapshotAssertion, +) -> None: + class OverallState(TypedDict): + locations: list[str] + results: Annotated[list[str], operator.add] + + def get_weather(state: OverallState) -> OverallState: + location = state["location"] + weather = "sunny" if len(location) > 2 else "cloudy" + return {"results": [f"It's {weather} in {location}"]} + + def continue_to_weather(state: OverallState) -> list[Send]: + return [ + Send("get_weather", {"location": location}) + for location in state["locations"] + ] + + workflow = StateGraph(OverallState) + + workflow.add_node("get_weather", get_weather) + workflow.add_edge("get_weather", END) + workflow.set_conditional_entry_point(continue_to_weather) + + app = workflow.compile() + + if SHOULD_CHECK_SNAPSHOTS: + assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + + assert app.invoke({"locations": ["sf", "nyc"]}, debug=True) == { + "locations": ["sf", "nyc"], + "results": ["It's cloudy in sf", "It's sunny in nyc"], + } + + assert [*app.stream({"locations": ["sf", "nyc"]}, stream_mode="values")][-1] == { + "locations": ["sf", "nyc"], + "results": ["It's cloudy in sf", "It's sunny in nyc"], + } + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_conditional_state_graph( + snapshot: SnapshotAssertion, + mocker: MockerFixture, + request: pytest.FixtureRequest, + checkpointer_name: str, +) -> None: + from langchain_core.language_models.fake import FakeStreamingListLLM + from langchain_core.prompts import PromptTemplate + from langchain_core.tools import tool + + checkpointer: BaseCheckpointSaver = request.getfixturevalue( + f"checkpointer_{checkpointer_name}" + ) + setup = mocker.Mock() + teardown = mocker.Mock() + + @contextmanager + def assert_ctx_once() -> Iterator[None]: + assert setup.call_count == 0 + assert teardown.call_count == 0 + try: + yield + finally: + assert setup.call_count == 1 + assert teardown.call_count == 1 + setup.reset_mock() + teardown.reset_mock() + + @contextmanager + def make_httpx_client() -> Iterator[httpx.Client]: + setup() + with httpx.Client() as client: + try: + yield client + finally: + teardown() + + class AgentState(TypedDict, total=False): + input: Annotated[str, UntrackedValue] + agent_outcome: Optional[Union[AgentAction, AgentFinish]] + intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add] + session: Annotated[httpx.Client, Context(make_httpx_client)] + + class ToolState(TypedDict, total=False): + agent_outcome: Union[AgentAction, AgentFinish] + session: Annotated[httpx.Client, Context(make_httpx_client)] + + # Assemble the tools + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + # Construct the agent + prompt = PromptTemplate.from_template("Hello!") + + llm = FakeStreamingListLLM( + responses=[ + "tool:search_api:query", + "tool:search_api:another", + "finish:answer", + ] + ) + + def agent_parser(input: str) -> dict[str, Union[AgentAction, AgentFinish]]: + if input.startswith("finish"): + _, answer = input.split(":") + return { + "agent_outcome": AgentFinish( + return_values={"answer": answer}, log=input + ) + } + else: + _, tool_name, tool_input = input.split(":") + return { + "agent_outcome": AgentAction( + tool=tool_name, tool_input=tool_input, log=input + ) + } + + agent = prompt | llm | agent_parser + + # Define tool execution logic + def execute_tools(data: ToolState) -> dict: + # check session in data + assert isinstance(data["session"], httpx.Client) + assert "input" not in data + assert "intermediate_steps" not in data + # execute the tool + agent_action: AgentAction = data.pop("agent_outcome") + observation = {t.name: t for t in tools}[agent_action.tool].invoke( + agent_action.tool_input + ) + return {"intermediate_steps": [[agent_action, observation]]} + + # Define decision-making logic + def should_continue(data: AgentState) -> str: + # check session in data + assert isinstance(data["session"], httpx.Client) + # Logic to decide whether to continue in the loop or exit + if isinstance(data["agent_outcome"], AgentFinish): + return "exit" + else: + return "continue" + + # Define a new graph + workflow = StateGraph(AgentState) + + workflow.add_node("agent", agent) + workflow.add_node("tools", execute_tools, input=ToolState) + + workflow.set_entry_point("agent") + + workflow.add_conditional_edges( + "agent", should_continue, {"continue": "tools", "exit": END} + ) + + workflow.add_edge("tools", "agent") + + app = workflow.compile() + + if SHOULD_CHECK_SNAPSHOTS: + assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + + with assert_ctx_once(): + assert app.invoke({"input": "what is weather in sf"}) == { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + } + + with assert_ctx_once(): + assert [*app.stream({"input": "what is weather in sf"})] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + } + }, + { + "agent": { + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + } + }, + ] + + # test state get/update methods with interrupt_after + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["agent"], + ) + config = {"configurable": {"thread_id": "1"}} + + with assert_ctx_once(): + assert [ + c + for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config) + ] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + "intermediate_steps": [], + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + with assert_ctx_once(): + app_w_interrupt.update_state( + config, + { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ) + }, + ) + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "intermediate_steps": [], + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 2, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ) + }, + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + with assert_ctx_once(): + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + {"__interrupt__": ()}, + ] + + with assert_ctx_once(): + app_w_interrupt.update_state( + config, + { + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ) + }, + ) + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + }, + tasks=(), + next=(), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 5, + "writes": { + "agent": { + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ) + } + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # test state get/update methods with interrupt_before + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before=["tools"], + debug=True, + ) + config = {"configurable": {"thread_id": "2"}} + llm.i = 0 # reset the llm + + assert [ + c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config) + ] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + } + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + "intermediate_steps": [], + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + app_w_interrupt.update_state( + config, + { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ) + }, + ) + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "intermediate_steps": [], + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 2, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ) + } + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + {"__interrupt__": ()}, + ] + + app_w_interrupt.update_state( + config, + { + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ) + }, + ) + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + }, + tasks=(), + next=(), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 5, + "writes": { + "agent": { + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ) + } + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # test w interrupt before all + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before="*", + debug=True, + ) + config = {"configurable": {"thread_id": "3"}} + llm.i = 0 # reset the llm + + assert [ + c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config) + ] == [ + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "intermediate_steps": [], + }, + tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),), + next=("agent",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "3", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + } + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + "intermediate_steps": [], + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + "thread_id": "3", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + } + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + }, + tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),), + next=("agent",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 2, + "writes": { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + } + }, + "thread_id": "3", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + {"__interrupt__": ()}, + ] + + # test w interrupt after all + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after="*", + ) + config = {"configurable": {"thread_id": "4"}} + llm.i = 0 # reset the llm + + assert [ + c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config) + ] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + } + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + "intermediate_steps": [], + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + "thread_id": "4", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + } + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + }, + tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),), + next=("agent",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 2, + "writes": { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + } + }, + "thread_id": "4", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + {"__interrupt__": ()}, + ] + + +def test_conditional_state_graph_with_list_edge_inputs(snapshot: SnapshotAssertion): + class State(TypedDict): + foo: Annotated[list[str], operator.add] + + graph_builder = StateGraph(State) + graph_builder.add_node("A", lambda x: {"foo": ["A"]}) + graph_builder.add_node("B", lambda x: {"foo": ["B"]}) + graph_builder.add_edge(START, "A") + graph_builder.add_edge(START, "B") + graph_builder.add_edge(["A", "B"], END) + + app = graph_builder.compile() + assert app.invoke({"foo": []}) == {"foo": ["A", "B"]} + + assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + + +def test_state_graph_w_config_inherited_state_keys(snapshot: SnapshotAssertion) -> None: + from langchain_core.language_models.fake import FakeStreamingListLLM + from langchain_core.prompts import PromptTemplate + from langchain_core.tools import tool + + class BaseState(TypedDict): + input: str + agent_outcome: Optional[Union[AgentAction, AgentFinish]] + + class AgentState(BaseState, total=False): + intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add] + + assert get_type_hints(AgentState).keys() == { + "input", + "agent_outcome", + "intermediate_steps", + } + + class Config(TypedDict, total=False): + tools: list[str] + + # Assemble the tools + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + # Construct the agent + prompt = PromptTemplate.from_template("Hello!") + + llm = FakeStreamingListLLM( + responses=[ + "tool:search_api:query", + "tool:search_api:another", + "finish:answer", + ] + ) + + def agent_parser(input: str) -> dict[str, Union[AgentAction, AgentFinish]]: + if input.startswith("finish"): + _, answer = input.split(":") + return { + "agent_outcome": AgentFinish( + return_values={"answer": answer}, log=input + ) + } + else: + _, tool_name, tool_input = input.split(":") + return { + "agent_outcome": AgentAction( + tool=tool_name, tool_input=tool_input, log=input + ) + } + + agent = prompt | llm | agent_parser + + # Define tool execution logic + def execute_tools(data: AgentState) -> dict: + agent_action: AgentAction = data.pop("agent_outcome") + observation = {t.name: t for t in tools}[agent_action.tool].invoke( + agent_action.tool_input + ) + return {"intermediate_steps": [(agent_action, observation)]} + + # Define decision-making logic + def should_continue(data: AgentState) -> str: + # Logic to decide whether to continue in the loop or exit + if isinstance(data["agent_outcome"], AgentFinish): + return "exit" + else: + return "continue" + + # Define a new graph + builder = StateGraph(AgentState, Config) + + builder.add_node("agent", agent) + builder.add_node("tools", execute_tools) + + builder.set_entry_point("agent") + + builder.add_conditional_edges( + "agent", should_continue, {"continue": "tools", "exit": END} + ) + + builder.add_edge("tools", "agent") + + app = builder.compile() + + if SHOULD_CHECK_SNAPSHOTS: + assert json.dumps(app.config_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot + + assert builder.channels.keys() == {"input", "agent_outcome", "intermediate_steps"} + + assert app.invoke({"input": "what is weather in sf"}) == { + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + "input": "what is weather in sf", + "intermediate_steps": [ + ( + AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + "result for query", + ), + ( + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ), + ], + } + + +def test_conditional_entrypoint_graph_state(snapshot: SnapshotAssertion) -> None: + class AgentState(TypedDict, total=False): + input: str + output: str + steps: Annotated[list[str], operator.add] + + def left(data: AgentState) -> AgentState: + return {"output": data["input"] + "->left"} + + def right(data: AgentState) -> AgentState: + return {"output": data["input"] + "->right"} + + def should_start(data: AgentState) -> str: + assert data["steps"] == [], "Expected input to be read from the state" + # Logic to decide where to start + if len(data["input"]) > 10: + return "go-right" + else: + return "go-left" + + # Define a new graph + workflow = StateGraph(AgentState) + + workflow.add_node("left", left) + workflow.add_node("right", right) + + workflow.set_conditional_entry_point( + should_start, {"go-left": "left", "go-right": "right"} + ) + + workflow.add_conditional_edges("left", lambda data: END, {END: END}) + workflow.add_edge("right", END) + + app = workflow.compile() + + if SHOULD_CHECK_SNAPSHOTS: + assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + + assert app.invoke({"input": "what is weather in sf"}) == { + "input": "what is weather in sf", + "output": "what is weather in sf->right", + "steps": [], + } + + assert [*app.stream({"input": "what is weather in sf"})] == [ + {"right": {"output": "what is weather in sf->right"}}, + ] + + +def test_prebuilt_tool_chat(snapshot: SnapshotAssertion) -> None: + from langchain_core.messages import AIMessage, HumanMessage + from langchain_core.tools import tool + + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + model = FakeChatModel( + messages=[ + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another"}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one"}, + }, + ], + ), + AIMessage(content="answer"), + ] + ) + + app = create_tool_calling_executor(model, tools) + + if SHOULD_CHECK_SNAPSHOTS: + assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + + assert app.invoke( + {"messages": [HumanMessage(content="what is weather in sf")]} + ) == { + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another"}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call234", + ), + _AnyIdToolMessage( + content="result for a third one", + name="search_api", + tool_call_id="tool_call567", + id=AnyStr(), + ), + _AnyIdAIMessage(content="answer"), + ] + } + + assert [ + c + for c in app.stream( + {"messages": [HumanMessage(content="what is weather in sf")]}, + stream_mode="messages", + ) + ] == [ + ( + _AnyIdAIMessageChunk( + content="", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "query"}, + "id": "tool_call123", + "type": "tool_call", + } + ], + tool_call_chunks=[ + { + "name": "search_api", + "args": '{"query": "query"}', + "id": "tool_call123", + "index": None, + "type": "tool_call_chunk", + } + ], + ), + { + "langgraph_step": 1, + "langgraph_node": "agent", + "langgraph_triggers": ["start:agent"], + "langgraph_path": (PULL, "agent"), + "langgraph_checkpoint_ns": AnyStr("agent:"), + "checkpoint_ns": AnyStr("agent:"), + "ls_provider": "fakechatmodel", + "ls_model_type": "chat", + }, + ), + ( + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ), + { + "langgraph_step": 2, + "langgraph_node": "tools", + "langgraph_triggers": ["branch:agent:should_continue:tools"], + "langgraph_path": (PULL, "tools"), + "langgraph_checkpoint_ns": AnyStr("tools:"), + }, + ), + ( + _AnyIdAIMessageChunk( + content="", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "another"}, + "id": "tool_call234", + "type": "tool_call", + }, + { + "name": "search_api", + "args": {"query": "a third one"}, + "id": "tool_call567", + "type": "tool_call", + }, + ], + tool_call_chunks=[ + { + "name": "search_api", + "args": '{"query": "another"}', + "id": "tool_call234", + "index": None, + "type": "tool_call_chunk", + }, + { + "name": "search_api", + "args": '{"query": "a third one"}', + "id": "tool_call567", + "index": None, + "type": "tool_call_chunk", + }, + ], + ), + { + "langgraph_step": 3, + "langgraph_node": "agent", + "langgraph_triggers": ["tools"], + "langgraph_path": (PULL, "agent"), + "langgraph_checkpoint_ns": AnyStr("agent:"), + "checkpoint_ns": AnyStr("agent:"), + "ls_provider": "fakechatmodel", + "ls_model_type": "chat", + }, + ), + ( + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call234", + ), + { + "langgraph_step": 4, + "langgraph_node": "tools", + "langgraph_triggers": ["branch:agent:should_continue:tools"], + "langgraph_path": (PULL, "tools"), + "langgraph_checkpoint_ns": AnyStr("tools:"), + }, + ), + ( + _AnyIdToolMessage( + content="result for a third one", + name="search_api", + tool_call_id="tool_call567", + ), + { + "langgraph_step": 4, + "langgraph_node": "tools", + "langgraph_triggers": ["branch:agent:should_continue:tools"], + "langgraph_path": (PULL, "tools"), + "langgraph_checkpoint_ns": AnyStr("tools:"), + }, + ), + ( + _AnyIdAIMessageChunk( + content="answer", + ), + { + "langgraph_step": 5, + "langgraph_node": "agent", + "langgraph_triggers": ["tools"], + "langgraph_path": (PULL, "agent"), + "langgraph_checkpoint_ns": AnyStr("agent:"), + "checkpoint_ns": AnyStr("agent:"), + "ls_provider": "fakechatmodel", + "ls_model_type": "chat", + }, + ), + ] + + assert app.invoke( + {"messages": [HumanMessage(content="what is weather in sf")]}, + {"recursion_limit": 2}, + debug=True, + ) == { + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + _AnyIdAIMessage(content="Sorry, need more steps to process this request."), + ] + } + + model.i = 0 # reset the model + + assert ( + app.invoke( + {"messages": [HumanMessage(content="what is weather in sf")]}, + stream_mode="updates", + )[0]["agent"]["messages"] + == [ + { + "agent": { + "messages": [ + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ) + ] + } + }, + { + "tools": { + "messages": [ + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ) + ] + } + }, + { + "agent": { + "messages": [ + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another"}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one"}, + }, + ], + ) + ] + } + }, + { + "tools": { + "messages": [ + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call234", + ), + _AnyIdToolMessage( + content="result for a third one", + name="search_api", + tool_call_id="tool_call567", + ), + ] + } + }, + {"agent": {"messages": [_AnyIdAIMessage(content="answer")]}}, + ][0]["agent"]["messages"] + ) + + assert [ + *app.stream({"messages": [HumanMessage(content="what is weather in sf")]}) + ] == [ + { + "agent": { + "messages": [ + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ) + ] + } + }, + { + "tools": { + "messages": [ + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ) + ] + } + }, + { + "agent": { + "messages": [ + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another"}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one"}, + }, + ], + ) + ] + } + }, + { + "tools": { + "messages": [ + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call234", + ), + _AnyIdToolMessage( + content="result for a third one", + name="search_api", + tool_call_id="tool_call567", + ), + ] + } + }, + {"agent": {"messages": [_AnyIdAIMessage(content="answer")]}}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_state_graph_packets( + request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture +) -> None: + from langchain_core.language_models.fake_chat_models import ( + FakeMessagesListChatModel, + ) + from langchain_core.messages import ( + AIMessage, + BaseMessage, + HumanMessage, + ToolCall, + ToolMessage, + ) + from langchain_core.tools import tool + + checkpointer: BaseCheckpointSaver = request.getfixturevalue( + f"checkpointer_{checkpointer_name}" + ) + + class AgentState(TypedDict): + messages: Annotated[list[BaseMessage], add_messages] + session: Annotated[httpx.Client, Context(httpx.Client)] + + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + tools_by_name = {t.name: t for t in tools} + + model = FakeMessagesListChatModel( + responses=[ + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ), + AIMessage(id="ai3", content="answer"), + ] + ) + + def agent(data: AgentState) -> AgentState: + assert isinstance(data["session"], httpx.Client) + return { + "messages": model.invoke(data["messages"]), + "something_extra": "hi there", + } + + # Define decision-making logic + def should_continue(data: AgentState) -> str: + assert isinstance(data["session"], httpx.Client) + assert ( + data["something_extra"] == "hi there" + ), "nodes can pass extra data to their cond edges, which isn't saved in state" + # Logic to decide whether to continue in the loop or exit + if tool_calls := data["messages"][-1].tool_calls: + return [Send("tools", tool_call) for tool_call in tool_calls] + else: + return END + + def tools_node(input: ToolCall, config: RunnableConfig) -> AgentState: + time.sleep(input["args"].get("idx", 0) / 10) + output = tools_by_name[input["name"]].invoke(input["args"], config) + return { + "messages": ToolMessage( + content=output, name=input["name"], tool_call_id=input["id"] + ) + } + + # Define a new graph + workflow = StateGraph(AgentState) + + # Define the two nodes we will cycle between + workflow.add_node("agent", agent) + workflow.add_node("tools", tools_node) + + # Set the entrypoint as `agent` + # This means that this node is the first one called + workflow.set_entry_point("agent") + + # We now add a conditional edge + workflow.add_conditional_edges("agent", should_continue) + + # We now add a normal edge from `tools` to `agent`. + # This means that after `tools` is called, `agent` node is called next. + workflow.add_edge("tools", "agent") + + # Finally, we compile it! + # This compiles it into a LangChain Runnable, + # meaning you can use it as you would any other runnable + app = workflow.compile() + + assert app.invoke({"messages": HumanMessage(content="what is weather in sf")}) == { + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ), + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call234", + ), + _AnyIdToolMessage( + content="result for a third one", + name="search_api", + tool_call_id="tool_call567", + ), + AIMessage(content="answer", id="ai3"), + ] + } + + assert [ + c + for c in app.stream( + {"messages": [HumanMessage(content="what is weather in sf")]} + ) + ] == [ + { + "agent": { + "messages": AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ) + }, + }, + { + "tools": { + "messages": _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ) + } + }, + { + "agent": { + "messages": AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ) + } + }, + { + "tools": { + "messages": _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call234", + ) + }, + }, + { + "tools": { + "messages": _AnyIdToolMessage( + content="result for a third one", + name="search_api", + tool_call_id="tool_call567", + ), + }, + }, + {"agent": {"messages": AIMessage(content="answer", id="ai3")}}, + ] + + # interrupt after agent + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["agent"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c + for c in app_w_interrupt.stream( + {"messages": HumanMessage(content="what is weather in sf")}, config + ) + ] == [ + { + "agent": { + "messages": AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ) + } + }, + {"__interrupt__": ()}, + ] + + if not FF_SEND_V2: + return + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + ] + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + content="", + additional_kwargs={}, + response_metadata={}, + id="ai1", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "query"}, + "id": "tool_call123", + "type": "tool_call", + } + ], + ) + }, + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr()) + ), + ), + next=("tools",), + config=(app_w_interrupt.checkpointer.get_tuple(config)).config, + created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # modify ai message + last_message = (app_w_interrupt.get_state(config)).values["messages"][-1] + last_message.tool_calls[0]["args"]["query"] = "a different query" + app_w_interrupt.update_state( + config, {"messages": last_message, "something_extra": "hi there"} + ) + + # message was replaced instead of appended + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + ] + }, + tasks=(PregelTask(AnyStr(), "tools", (PUSH, (), 0, AnyStr())),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": { + "agent": { + "messages": AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + "something_extra": "hi there", + } + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "tools": { + "messages": _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ) + } + }, + { + "agent": { + "messages": AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ) + }, + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ), + ] + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + "", + id="ai2", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "another", "idx": 0}, + "id": "tool_call234", + "type": "tool_call", + }, + { + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + "id": "tool_call567", + "type": "tool_call", + }, + ], + ) + }, + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr()) + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 3, AnyStr()) + ), + ), + next=("tools", "tools"), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 2, + "writes": { + "tools": { + "messages": _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + }, + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + app_w_interrupt.update_state( + config, + { + "messages": AIMessage(content="answer", id="ai2"), + "something_extra": "hi there", + }, + ) + + # replaces message even if object identity is different, as long as id is the same + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage(content="answer", id="ai2"), + ] + }, + tasks=(), + next=(), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 3, + "writes": { + "agent": { + "messages": AIMessage(content="answer", id="ai2"), + "something_extra": "hi there", + } + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # interrupt before tools + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before=["tools"], + ) + config = {"configurable": {"thread_id": "2"}} + model.i = 0 + + assert [ + c + for c in app_w_interrupt.stream( + {"messages": HumanMessage(content="what is weather in sf")}, config + ) + ] == [ + { + "agent": { + "messages": AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ) + } + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + ] + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + "", + id="ai1", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "query"}, + "id": "tool_call123", + "type": "tool_call", + } + ], + ) + }, + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr()) + ), + ), + next=("tools",), + config=(app_w_interrupt.checkpointer.get_tuple(config)).config, + created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # modify ai message + last_message = (app_w_interrupt.get_state(config)).values["messages"][-1] + last_message.tool_calls[0]["args"]["query"] = "a different query" + app_w_interrupt.update_state( + config, {"messages": last_message, "something_extra": "hi there"} + ) + + # message was replaced instead of appended + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + ] + }, + tasks=(PregelTask(AnyStr(), "tools", (PUSH, (), 0, AnyStr())),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": { + "agent": { + "messages": AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + "something_extra": "hi there", + } + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "tools": { + "messages": _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ) + } + }, + { + "agent": { + "messages": AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ) + }, + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ), + ] + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + "", + id="ai2", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "another", "idx": 0}, + "id": "tool_call234", + "type": "tool_call", + }, + { + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + "id": "tool_call567", + "type": "tool_call", + }, + ], + ) + }, + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr()) + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 3, AnyStr()) + ), + ), + next=("tools", "tools"), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 2, + "writes": { + "tools": { + "messages": _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + }, + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + app_w_interrupt.update_state( + config, + { + "messages": AIMessage(content="answer", id="ai2"), + "something_extra": "hi there", + }, + ) + + # replaces message even if object identity is different, as long as id is the same + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage(content="answer", id="ai2"), + ] + }, + tasks=(), + next=(), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 3, + "writes": { + "agent": { + "messages": AIMessage(content="answer", id="ai2"), + "something_extra": "hi there", + } + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_message_graph( + snapshot: SnapshotAssertion, + deterministic_uuids: MockerFixture, + request: pytest.FixtureRequest, + checkpointer_name: str, +) -> None: + from copy import deepcopy + + from langchain_core.callbacks import CallbackManagerForLLMRun + from langchain_core.language_models.fake_chat_models import ( + FakeMessagesListChatModel, + ) + from langchain_core.messages import ( + AIMessage, + BaseMessage, + HumanMessage, + ) + from langchain_core.outputs import ChatGeneration, ChatResult + from langchain_core.tools import tool + + checkpointer: BaseCheckpointSaver = request.getfixturevalue( + f"checkpointer_{checkpointer_name}" + ) + + class FakeFuntionChatModel(FakeMessagesListChatModel): + def bind_functions(self, functions: list): + return self + + def _generate( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + response = deepcopy(self.responses[self.i]) + if self.i < len(self.responses) - 1: + self.i += 1 + else: + self.i = 0 + generation = ChatGeneration(message=response) + return ChatResult(generations=[generation]) + + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + model = FakeFuntionChatModel( + responses=[ + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ), + AIMessage(content="answer", id="ai3"), + ] + ) + + # Define the function that determines whether to continue or not + def should_continue(messages): + last_message = messages[-1] + # If there is no function call, then we finish + if not last_message.tool_calls: + return "end" + # Otherwise if there is, we continue + else: + return "continue" + + # Define a new graph + workflow = MessageGraph() + + # Define the two nodes we will cycle between + workflow.add_node("agent", model) + workflow.add_node("tools", ToolNode(tools)) + + # Set the entrypoint as `agent` + # This means that this node is the first one called + workflow.set_entry_point("agent") + + # We now add a conditional edge + workflow.add_conditional_edges( + # First, we define the start node. We use `agent`. + # This means these are the edges taken after the `agent` node is called. + "agent", + # Next, we pass in the function that will determine which node is called next. + should_continue, + # Finally we pass in a mapping. + # The keys are strings, and the values are other nodes. + # END is a special node marking that the graph should finish. + # What will happen is we will call `should_continue`, and then the output of that + # will be matched against the keys in this mapping. + # Based on which one it matches, that node will then be called. + { + # If `tools`, then we call the tool node. + "continue": "tools", + # Otherwise we finish. + "end": END, + }, + ) + + # We now add a normal edge from `tools` to `agent`. + # This means that after `tools` is called, `agent` node is called next. + workflow.add_edge("tools", "agent") + + # Finally, we compile it! + # This compiles it into a LangChain Runnable, + # meaning you can use it as you would any other runnable + app = workflow.compile() + + if SHOULD_CHECK_SNAPSHOTS: + assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot + assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + + assert app.invoke(HumanMessage(content="what is weather in sf")) == [ + _AnyIdHumanMessage( + content="what is weather in sf", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", # respects ids passed in + ), + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ), + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call456", + ), + AIMessage(content="answer", id="ai3"), + ] + + assert [*app.stream([HumanMessage(content="what is weather in sf")])] == [ + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + { + "tools": [ + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ) + ] + }, + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + { + "tools": [ + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call456", + ) + ] + }, + {"agent": AIMessage(content="answer", id="ai3")}, + ] + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["agent"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c for c in app_w_interrupt.stream(("human", "what is weather in sf"), config) + ] == [ + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # modify ai message + last_message = app_w_interrupt.get_state(config).values[-1] + last_message.tool_calls[0]["args"] = {"query": "a different query"} + next_config = app_w_interrupt.update_state(config, last_message) + + # message was replaced instead of appended + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=next_config, + created_at=AnyStr(), + metadata={ + "parents": {}, + "source": "update", + "step": 2, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + id="ai1", + ) + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "tools": [ + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ) + ] + }, + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 4, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + app_w_interrupt.update_state( + config, + AIMessage(content="answer", id="ai2"), # replace existing message + ) + + # replaces message even if object identity is different, as long as id is the same + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage(content="answer", id="ai2"), + ], + tasks=(), + next=(), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 5, + "writes": {"agent": AIMessage(content="answer", id="ai2")}, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before=["tools"], + ) + config = {"configurable": {"thread_id": "2"}} + model.i = 0 # reset the llm + + assert [c for c in app_w_interrupt.stream("what is weather in sf", config)] == [ + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # modify ai message + last_message = app_w_interrupt.get_state(config).values[-1] + last_message.tool_calls[0]["args"] = {"query": "a different query"} + app_w_interrupt.update_state(config, last_message) + + # message was replaced instead of appended + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 2, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + id="ai1", + ) + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "tools": [ + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ) + ] + }, + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 4, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + app_w_interrupt.update_state( + config, + AIMessage(content="answer", id="ai2"), + ) + + # replaces message even if object identity is different, as long as id is the same + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + id=AnyStr(), + ), + AIMessage(content="answer", id="ai2"), + ], + tasks=(), + next=(), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 5, + "writes": {"agent": AIMessage(content="answer", id="ai2")}, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # add an extra message as if it came from "tools" node + app_w_interrupt.update_state(config, ("ai", "an extra message"), as_node="tools") + + # extra message is coerced BaseMessge and appended + # now the next node is "agent" per the graph edges + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + id=AnyStr(), + ), + AIMessage(content="answer", id="ai2"), + _AnyIdAIMessage(content="an extra message"), + ], + tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),), + next=("agent",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 6, + "writes": {"tools": UnsortedSequence("ai", "an extra message")}, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_root_graph( + deterministic_uuids: MockerFixture, + request: pytest.FixtureRequest, + checkpointer_name: str, +) -> None: + from copy import deepcopy + + from langchain_core.callbacks import CallbackManagerForLLMRun + from langchain_core.language_models.fake_chat_models import ( + FakeMessagesListChatModel, + ) + from langchain_core.messages import ( + AIMessage, + BaseMessage, + HumanMessage, + ) + from langchain_core.outputs import ChatGeneration, ChatResult + from langchain_core.tools import tool + + checkpointer: BaseCheckpointSaver = request.getfixturevalue( + f"checkpointer_{checkpointer_name}" + ) + + class FakeFuntionChatModel(FakeMessagesListChatModel): + def bind_functions(self, functions: list): + return self + + def _generate( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + response = deepcopy(self.responses[self.i]) + if self.i < len(self.responses) - 1: + self.i += 1 + else: + self.i = 0 + generation = ChatGeneration(message=response) + return ChatResult(generations=[generation]) + + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + model = FakeFuntionChatModel( + responses=[ + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ), + AIMessage(content="answer", id="ai3"), + ] + ) + + # Define the function that determines whether to continue or not + def should_continue(messages): + last_message = messages[-1] + # If there is no function call, then we finish + if not last_message.tool_calls: + return "end" + # Otherwise if there is, we continue + else: + return "continue" + + class State(TypedDict): + __root__: Annotated[list[BaseMessage], add_messages] + + # Define a new graph + workflow = StateGraph(State) + + # Define the two nodes we will cycle between + workflow.add_node("agent", model) + workflow.add_node("tools", ToolNode(tools)) + + # Set the entrypoint as `agent` + # This means that this node is the first one called + workflow.set_entry_point("agent") + + # We now add a conditional edge + workflow.add_conditional_edges( + # First, we define the start node. We use `agent`. + # This means these are the edges taken after the `agent` node is called. + "agent", + # Next, we pass in the function that will determine which node is called next. + should_continue, + # Finally we pass in a mapping. + # The keys are strings, and the values are other nodes. + # END is a special node marking that the graph should finish. + # What will happen is we will call `should_continue`, and then the output of that + # will be matched against the keys in this mapping. + # Based on which one it matches, that node will then be called. + { + # If `tools`, then we call the tool node. + "continue": "tools", + # Otherwise we finish. + "end": END, + }, + ) + + # We now add a normal edge from `tools` to `agent`. + # This means that after `tools` is called, `agent` node is called next. + workflow.add_edge("tools", "agent") + + # Finally, we compile it! + # This compiles it into a LangChain Runnable, + # meaning you can use it as you would any other runnable + app = workflow.compile() + + assert app.invoke(HumanMessage(content="what is weather in sf")) == [ + _AnyIdHumanMessage( + content="what is weather in sf", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", # respects ids passed in + ), + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ), + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call456", + ), + AIMessage(content="answer", id="ai3"), + ] + + assert [*app.stream([HumanMessage(content="what is weather in sf")])] == [ + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + { + "tools": [ + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + # id="00000000-0000-4000-8000-000000000033", # TODO - We are off by one because pymongo.synchronous.client_session._ServerSession.session_id uses uuid4 + ) + ] + }, + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + { + "tools": [ + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call456", + # id="00000000-0000-4000-8000-000000000041", + ) + ] + }, + {"agent": AIMessage(content="answer", id="ai3")}, + ] + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["agent"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c for c in app_w_interrupt.stream(("human", "what is weather in sf"), config) + ] == [ + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # modify ai message + last_message = app_w_interrupt.get_state(config).values[-1] + last_message.tool_calls[0]["args"] = {"query": "a different query"} + next_config = app_w_interrupt.update_state(config, last_message) + + # message was replaced instead of appended + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=next_config, + created_at=AnyStr(), + metadata={ + "parents": {}, + "source": "update", + "step": 2, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + id="ai1", + ) + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "tools": [ + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ) + ] + }, + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + id=AnyStr(), + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 4, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + app_w_interrupt.update_state( + config, + AIMessage(content="answer", id="ai2"), # replace existing message + ) + + # replaces message even if object identity is different, as long as id is the same + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + id=AnyStr(), + ), + AIMessage(content="answer", id="ai2"), + ], + tasks=(), + next=(), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 5, + "writes": {"agent": AIMessage(content="answer", id="ai2")}, + "thread_id": "1", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before=["tools"], + ) + config = {"configurable": {"thread_id": "2"}} + model.i = 0 # reset the llm + + assert [c for c in app_w_interrupt.stream("what is weather in sf", config)] == [ + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # modify ai message + last_message = app_w_interrupt.get_state(config).values[-1] + last_message.tool_calls[0]["args"] = {"query": "a different query"} + app_w_interrupt.update_state(config, last_message) + + # message was replaced instead of appended + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 2, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + id="ai1", + ) + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + { + "tools": [ + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ) + ] + }, + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + {"__interrupt__": ()}, + ] + + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + id=AnyStr(), + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 4, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + app_w_interrupt.update_state( + config, + AIMessage(content="answer", id="ai2"), + ) + + # replaces message even if object identity is different, as long as id is the same + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage(content="answer", id="ai2"), + ], + tasks=(), + next=(), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 5, + "writes": {"agent": AIMessage(content="answer", id="ai2")}, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # add an extra message as if it came from "tools" node + app_w_interrupt.update_state(config, ("ai", "an extra message"), as_node="tools") + + # extra message is coerced BaseMessge and appended + # now the next node is "agent" per the graph edges + assert app_w_interrupt.get_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + id=AnyStr(), + ), + AIMessage(content="answer", id="ai2"), + _AnyIdAIMessage(content="an extra message"), + ], + tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),), + next=("agent",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 6, + "writes": {"tools": UnsortedSequence("ai", "an extra message")}, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # create new graph with one more state key, reuse previous thread history + + def simple_add(left, right): + if not isinstance(right, list): + right = [right] + return left + right + + class MoreState(TypedDict): + __root__: Annotated[list[BaseMessage], simple_add] + something_else: str + + # Define a new graph + new_workflow = StateGraph(MoreState) + new_workflow.add_node( + "agent", RunnableMap(__root__=RunnablePick("__root__") | model) + ) + new_workflow.add_node( + "tools", RunnableMap(__root__=RunnablePick("__root__") | ToolNode(tools)) + ) + new_workflow.set_entry_point("agent") + new_workflow.add_conditional_edges( + "agent", + RunnablePick("__root__") | should_continue, + { + # If `tools`, then we call the tool node. + "continue": "tools", + # Otherwise we finish. + "end": END, + }, + ) + new_workflow.add_edge("tools", "agent") + new_app = new_workflow.compile(checkpointer=checkpointer) + model.i = 0 # reset the llm + + # previous state is converted to new schema + assert new_app.get_state(config) == StateSnapshot( + values={ + "__root__": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage(content="answer", id="ai2"), + _AnyIdAIMessage(content="an extra message"), + ] + }, + tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),), + next=("agent",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 6, + "writes": {"tools": UnsortedSequence("ai", "an extra message")}, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + # new input is merged to old state + assert new_app.invoke( + { + "__root__": [HumanMessage(content="what is weather in la")], + "something_else": "value", + }, + config, + interrupt_before=["agent"], + ) == { + "__root__": [ + _AnyIdHumanMessage( + content="what is weather in sf", + # id="00000000-0000-4000-8000-000000000070", + ), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "a different query"}, + "id": "tool_call123", + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage(content="answer", id="ai2"), + _AnyIdAIMessage( + content="an extra message", + # id="00000000-0000-4000-8000-000000000092" + ), + HumanMessage(content="what is weather in la"), + ], + "something_else": "value", + } + + +def test_in_one_fan_out_out_one_graph_state() -> None: + def sorted_add(x: list[str], y: list[str]) -> list[str]: + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], sorted_add] + + def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + def retriever_one(data: State) -> State: + # timer ensures stream output order is stable + # also, it confirms that the update order is not dependent on finishing order + # instead being defined by the order of the nodes/edges in the graph definition + # ie. stable between invocations + time.sleep(0.1) + return {"docs": ["doc1", "doc2"]} + + def retriever_two(data: State) -> State: + return {"docs": ["doc3", "doc4"]} + + def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "retriever_one") + workflow.add_edge("rewrite_query", "retriever_two") + workflow.add_edge("retriever_one", "qa") + workflow.add_edge("retriever_two", "qa") + workflow.set_finish_point("qa") + + app = workflow.compile() + + assert app.invoke({"query": "what is weather in sf"}) == { + "query": "query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + } + + assert [*app.stream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + assert [*app.stream({"query": "what is weather in sf"}, stream_mode="values")] == [ + {"query": "what is weather in sf", "docs": []}, + {"query": "query: what is weather in sf", "docs": []}, + { + "query": "query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + }, + { + "query": "query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + }, + ] + + assert [ + *app.stream( + {"query": "what is weather in sf"}, + stream_mode=["values", "updates", "debug"], + ) + ] == [ + ("values", {"query": "what is weather in sf", "docs": []}), + ( + "debug", + { + "type": "task", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "rewrite_query", + "input": {"query": "what is weather in sf", "docs": []}, + "triggers": ["start:rewrite_query"], + }, + }, + ), + ("updates", {"rewrite_query": {"query": "query: what is weather in sf"}}), + ( + "debug", + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "rewrite_query", + "result": [("query", "query: what is weather in sf")], + "error": None, + "interrupts": [], + }, + }, + ), + ("values", {"query": "query: what is weather in sf", "docs": []}), + ( + "debug", + { + "type": "task", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "retriever_one", + "input": {"query": "query: what is weather in sf", "docs": []}, + "triggers": ["rewrite_query"], + }, + }, + ), + ( + "debug", + { + "type": "task", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "retriever_two", + "input": {"query": "query: what is weather in sf", "docs": []}, + "triggers": ["rewrite_query"], + }, + }, + ), + ( + "updates", + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + ), + ( + "debug", + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "retriever_two", + "result": [("docs", ["doc3", "doc4"])], + "error": None, + "interrupts": [], + }, + }, + ), + ( + "updates", + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + ), + ( + "debug", + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "retriever_one", + "result": [("docs", ["doc1", "doc2"])], + "error": None, + "interrupts": [], + }, + }, + ), + ( + "values", + { + "query": "query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + }, + ), + ( + "debug", + { + "type": "task", + "timestamp": AnyStr(), + "step": 3, + "payload": { + "id": AnyStr(), + "name": "qa", + "input": { + "query": "query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + }, + "triggers": ["retriever_one", "retriever_two"], + }, + }, + ), + ("updates", {"qa": {"answer": "doc1,doc2,doc3,doc4"}}), + ( + "debug", + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 3, + "payload": { + "id": AnyStr(), + "name": "qa", + "result": [("answer", "doc1,doc2,doc3,doc4")], + "error": None, + "interrupts": [], + }, + }, + ), + ( + "values", + { + "query": "query: what is weather in sf", + "answer": "doc1,doc2,doc3,doc4", + "docs": ["doc1", "doc2", "doc3", "doc4"], + }, + ), + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_dynamic_interrupt( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + + class State(TypedDict): + my_key: Annotated[str, operator.add] + market: str + + tool_two_node_count = 0 + + def tool_two_node(s: State) -> State: + nonlocal tool_two_node_count + tool_two_node_count += 1 + if s["market"] == "DE": + answer = interrupt("Just because...") + else: + answer = " all good" + return {"my_key": answer} + + tool_two_graph = StateGraph(State) + tool_two_graph.add_node("tool_two", tool_two_node, retry=RetryPolicy()) + tool_two_graph.add_edge(START, "tool_two") + tool_two = tool_two_graph.compile() + + tracer = FakeTracer() + assert tool_two.invoke( + {"my_key": "value", "market": "DE"}, {"callbacks": [tracer]} + ) == { + "my_key": "value", + "market": "DE", + } + assert tool_two_node_count == 1, "interrupts aren't retried" + assert len(tracer.runs) == 1 + run = tracer.runs[0] + assert run.end_time is not None + assert run.error is None + assert run.outputs == {"market": "DE", "my_key": "value"} + + assert tool_two.invoke({"my_key": "value", "market": "US"}) == { + "my_key": "value all good", + "market": "US", + } + + tool_two = tool_two_graph.compile(checkpointer=checkpointer) + + # missing thread_id + with pytest.raises(ValueError, match="thread_id"): + tool_two.invoke({"my_key": "value", "market": "DE"}) + + # flow: interrupt -> resume with answer + thread2 = {"configurable": {"thread_id": "2"}} + # stop when about to enter node + assert [ + c for c in tool_two.stream({"my_key": "value ⛰️", "market": "DE"}, thread2) + ] == [ + { + "__interrupt__": ( + Interrupt( + value="Just because...", + resumable=True, + ns=[AnyStr("tool_two:")], + ), + ) + }, + ] + # resume with answer + assert [c for c in tool_two.stream(Command(resume=" my answer"), thread2)] == [ + {"tool_two": {"my_key": " my answer"}}, + ] + + # flow: interrupt -> clear tasks + thread1 = {"configurable": {"thread_id": "1"}} + # stop when about to enter node + assert tool_two.invoke({"my_key": "value ⛰️", "market": "DE"}, thread1) == { + "my_key": "value ⛰️", + "market": "DE", + } + assert [c.metadata for c in tool_two.checkpointer.list(thread1)] == [ + { + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "1", + }, + { + "parents": {}, + "source": "input", + "step": -1, + "writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}}, + "thread_id": "1", + }, + ] + assert tool_two.get_state(thread1) == StateSnapshot( + values={"my_key": "value ⛰️", "market": "DE"}, + next=("tool_two",), + tasks=( + PregelTask( + AnyStr(), + "tool_two", + (PULL, "tool_two"), + interrupts=( + Interrupt( + value="Just because...", + resumable=True, + ns=[AnyStr("tool_two:")], + ), + ), + ), + ), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "1", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + # clear the interrupt and next tasks + tool_two.update_state(thread1, None, as_node=END) + # interrupt and next tasks are cleared + assert tool_two.get_state(thread1) == StateSnapshot( + values={"my_key": "value ⛰️", "market": "DE"}, + next=(), + tasks=(), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": {}, + "thread_id": "1", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + + +@pytest.mark.skipif(not FF_SEND_V2, reason="send v2 is not enabled") +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_copy_checkpoint( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + + class State(TypedDict): + my_key: Annotated[str, operator.add] + market: str + + def tool_one(s: State) -> State: + return {"my_key": " one"} + + tool_two_node_count = 0 + + def tool_two_node(s: State) -> State: + nonlocal tool_two_node_count + tool_two_node_count += 1 + if s["market"] == "DE": + answer = interrupt("Just because...") + else: + answer = " all good" + return {"my_key": answer} + + def start(state: State) -> list[Union[Send, str]]: + return ["tool_two", Send("tool_one", state)] + + tool_two_graph = StateGraph(State) + tool_two_graph.add_node("tool_two", tool_two_node, retry=RetryPolicy()) + tool_two_graph.add_node("tool_one", tool_one) + tool_two_graph.set_conditional_entry_point(start) + tool_two = tool_two_graph.compile() + + tracer = FakeTracer() + assert tool_two.invoke( + {"my_key": "value", "market": "DE"}, {"callbacks": [tracer]} + ) == { + "my_key": "value one", + "market": "DE", + } + assert tool_two_node_count == 1, "interrupts aren't retried" + assert len(tracer.runs) == 1 + run = tracer.runs[0] + assert run.end_time is not None + assert run.error is None + assert run.outputs == {"market": "DE", "my_key": "value one"} + + assert tool_two.invoke({"my_key": "value", "market": "US"}) == { + "my_key": "value one all good", + "market": "US", + } + + tool_two = tool_two_graph.compile(checkpointer=checkpointer) + + # missing thread_id + with pytest.raises(ValueError, match="thread_id"): + tool_two.invoke({"my_key": "value", "market": "DE"}) + + # flow: interrupt -> resume with answer + thread2 = {"configurable": {"thread_id": "2"}} + # stop when about to enter node + assert [ + c for c in tool_two.stream({"my_key": "value ⛰️", "market": "DE"}, thread2) + ] == [ + { + "tool_one": {"my_key": " one"}, + }, + { + "__interrupt__": ( + Interrupt( + value="Just because...", + resumable=True, + ns=[AnyStr("tool_two:")], + ), + ) + }, + ] + # resume with answer + assert [c for c in tool_two.stream(Command(resume=" my answer"), thread2)] == [ + {"tool_two": {"my_key": " my answer"}}, + ] + + # flow: interrupt -> clear tasks + thread1 = {"configurable": {"thread_id": "1"}} + # stop when about to enter node + assert tool_two.invoke({"my_key": "value ⛰️", "market": "DE"}, thread1) == { + "my_key": "value ⛰️ one", + "market": "DE", + } + assert [c.metadata for c in tool_two.checkpointer.list(thread1)] == [ + { + "parents": {}, + "source": "loop", + "step": 0, + "writes": {"tool_one": {"my_key": " one"}}, + "thread_id": "1", + }, + { + "parents": {}, + "source": "input", + "step": -1, + "writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}}, + "thread_id": "1", + }, + ] + assert tool_two.get_state(thread1) == StateSnapshot( + values={"my_key": "value ⛰️ one", "market": "DE"}, + next=("tool_two",), + tasks=( + PregelTask( + AnyStr(), + "tool_two", + (PULL, "tool_two"), + interrupts=( + Interrupt( + value="Just because...", + resumable=True, + ns=[AnyStr("tool_two:")], + ), + ), + ), + ), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": {"tool_one": {"my_key": " one"}}, + "thread_id": "1", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + # clear the interrupt and next tasks + tool_two.update_state(thread1, None) + # interrupt is cleared, next task is kept + assert tool_two.get_state(thread1) == StateSnapshot( + values={"my_key": "value ⛰️ one", "market": "DE"}, + next=("tool_two",), + tasks=( + PregelTask( + AnyStr(), + "tool_two", + (PULL, "tool_two"), + interrupts=(), + ), + ), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": {}, + "thread_id": "1", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_start_branch_then( + snapshot: SnapshotAssertion, request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + + class State(TypedDict): + my_key: Annotated[str, operator.add] + market: str + shared: Annotated[dict[str, dict[str, Any]], SharedValue.on("assistant_id")] + + def assert_shared_value(data: State, config: RunnableConfig) -> State: + assert "shared" in data + if thread_id := config["configurable"].get("thread_id"): + if thread_id == "1": + # this is the first thread, so should not see a value + assert data["shared"] == {} + return {"shared": {"1": {"hello": "world"}}} + elif thread_id == "2": + # this should get value saved by thread 1 + assert data["shared"] == {"1": {"hello": "world"}} + elif thread_id == "3": + # this is a different assistant, so should not see previous value + assert data["shared"] == {} + return {} + + def tool_two_slow(data: State, config: RunnableConfig) -> State: + return {"my_key": " slow", **assert_shared_value(data, config)} + + def tool_two_fast(data: State, config: RunnableConfig) -> State: + return {"my_key": " fast", **assert_shared_value(data, config)} + + tool_two_graph = StateGraph(State) + tool_two_graph.add_node("tool_two_slow", tool_two_slow) + tool_two_graph.add_node("tool_two_fast", tool_two_fast) + tool_two_graph.set_conditional_entry_point( + lambda s: "tool_two_slow" if s["market"] == "DE" else "tool_two_fast", then=END + ) + tool_two = tool_two_graph.compile() + assert tool_two.get_graph().draw_mermaid() == snapshot + + assert tool_two.invoke({"my_key": "value", "market": "DE"}) == { + "my_key": "value slow", + "market": "DE", + } + assert tool_two.invoke({"my_key": "value", "market": "US"}) == { + "my_key": "value fast", + "market": "US", + } + + tool_two = tool_two_graph.compile( + store=InMemoryStore(), + checkpointer=checkpointer, + interrupt_before=["tool_two_fast", "tool_two_slow"], + ) + + # missing thread_id + with pytest.raises(ValueError, match="thread_id"): + tool_two.invoke({"my_key": "value", "market": "DE"}) + + thread1 = {"configurable": {"thread_id": "1", "assistant_id": "a"}} + # stop when about to enter node + assert tool_two.invoke({"my_key": "value ⛰️", "market": "DE"}, thread1) == { + "my_key": "value ⛰️", + "market": "DE", + } + assert [c.metadata for c in tool_two.checkpointer.list(thread1)] == [ + { + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "assistant_id": "a", + "thread_id": "1", + }, + { + "parents": {}, + "source": "input", + "step": -1, + "writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}}, + "assistant_id": "a", + "thread_id": "1", + }, + ] + assert tool_two.get_state(thread1) == StateSnapshot( + values={"my_key": "value ⛰️", "market": "DE"}, + tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),), + next=("tool_two_slow",), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "assistant_id": "a", + "thread_id": "1", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + # resume, for same result as above + assert tool_two.invoke(None, thread1, debug=1) == { + "my_key": "value ⛰️ slow", + "market": "DE", + } + assert tool_two.get_state(thread1) == StateSnapshot( + values={"my_key": "value ⛰️ slow", "market": "DE"}, + tasks=(), + next=(), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"tool_two_slow": {"my_key": " slow"}}, + "assistant_id": "a", + "thread_id": "1", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + + thread2 = {"configurable": {"thread_id": "2", "assistant_id": "a"}} + # stop when about to enter node + assert tool_two.invoke({"my_key": "value", "market": "US"}, thread2) == { + "my_key": "value", + "market": "US", + } + assert tool_two.get_state(thread2) == StateSnapshot( + values={"my_key": "value", "market": "US"}, + tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),), + next=("tool_two_fast",), + config=tool_two.checkpointer.get_tuple(thread2).config, + created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "assistant_id": "a", + "thread_id": "2", + }, + parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config, + ) + # resume, for same result as above + assert tool_two.invoke(None, thread2, debug=1) == { + "my_key": "value fast", + "market": "US", + } + assert tool_two.get_state(thread2) == StateSnapshot( + values={"my_key": "value fast", "market": "US"}, + tasks=(), + next=(), + config=tool_two.checkpointer.get_tuple(thread2).config, + created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"tool_two_fast": {"my_key": " fast"}}, + "assistant_id": "a", + "thread_id": "2", + }, + parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config, + ) + + thread3 = {"configurable": {"thread_id": "3", "assistant_id": "b"}} + # stop when about to enter node + assert tool_two.invoke({"my_key": "value", "market": "US"}, thread3) == { + "my_key": "value", + "market": "US", + } + assert tool_two.get_state(thread3) == StateSnapshot( + values={"my_key": "value", "market": "US"}, + tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),), + next=("tool_two_fast",), + config=tool_two.checkpointer.get_tuple(thread3).config, + created_at=tool_two.checkpointer.get_tuple(thread3).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "assistant_id": "b", + "thread_id": "3", + }, + parent_config=[*tool_two.checkpointer.list(thread3, limit=2)][-1].config, + ) + # update state + tool_two.update_state(thread3, {"my_key": "key"}) # appends to my_key + assert tool_two.get_state(thread3) == StateSnapshot( + values={"my_key": "valuekey", "market": "US"}, + tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),), + next=("tool_two_fast",), + config=tool_two.checkpointer.get_tuple(thread3).config, + created_at=tool_two.checkpointer.get_tuple(thread3).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": {START: {"my_key": "key"}}, + "assistant_id": "b", + "thread_id": "3", + }, + parent_config=[*tool_two.checkpointer.list(thread3, limit=2)][-1].config, + ) + # resume, for same result as above + assert tool_two.invoke(None, thread3, debug=1) == { + "my_key": "valuekey fast", + "market": "US", + } + assert tool_two.get_state(thread3) == StateSnapshot( + values={"my_key": "valuekey fast", "market": "US"}, + tasks=(), + next=(), + config=tool_two.checkpointer.get_tuple(thread3).config, + created_at=tool_two.checkpointer.get_tuple(thread3).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 2, + "writes": {"tool_two_fast": {"my_key": " fast"}}, + "assistant_id": "b", + "thread_id": "3", + }, + parent_config=[*tool_two.checkpointer.list(thread3, limit=2)][-1].config, + ) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_branch_then( + snapshot: SnapshotAssertion, request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + + class State(TypedDict): + my_key: Annotated[str, operator.add] + market: str + + tool_two_graph = StateGraph(State) + tool_two_graph.set_entry_point("prepare") + tool_two_graph.set_finish_point("finish") + tool_two_graph.add_conditional_edges( + source="prepare", + path=lambda s: "tool_two_slow" if s["market"] == "DE" else "tool_two_fast", + then="finish", + ) + tool_two_graph.add_node("prepare", lambda s: {"my_key": " prepared"}) + tool_two_graph.add_node("tool_two_slow", lambda s: {"my_key": " slow"}) + tool_two_graph.add_node("tool_two_fast", lambda s: {"my_key": " fast"}) + tool_two_graph.add_node("finish", lambda s: {"my_key": " finished"}) + tool_two = tool_two_graph.compile() + assert tool_two.get_graph().draw_mermaid(with_styles=False) == snapshot + assert tool_two.get_graph().draw_mermaid() == snapshot + + assert tool_two.invoke({"my_key": "value", "market": "DE"}, debug=1) == { + "my_key": "value prepared slow finished", + "market": "DE", + } + assert tool_two.invoke({"my_key": "value", "market": "US"}) == { + "my_key": "value prepared fast finished", + "market": "US", + } + + # test stream_mode=debug + tool_two = tool_two_graph.compile(checkpointer=checkpointer) + thread10 = {"configurable": {"thread_id": "10"}} + + res = [ + *tool_two.stream( + {"my_key": "value", "market": "DE"}, thread10, stream_mode="debug" + ) + ] + + assert res == [ + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": -1, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": {"my_key": ""}, + "metadata": { + "parents": {}, + "source": "input", + "step": -1, + "writes": {"__start__": {"my_key": "value", "market": "DE"}}, + "thread_id": "10", + }, + "parent_config": None, + "next": ["__start__"], + "tasks": [ + { + "id": AnyStr(), + "name": "__start__", + "interrupts": (), + "state": None, + } + ], + }, + }, + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": 0, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": { + "my_key": "value", + "market": "DE", + }, + "metadata": { + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "10", + }, + "parent_config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "next": ["prepare"], + "tasks": [ + {"id": AnyStr(), "name": "prepare", "interrupts": (), "state": None} + ], + }, + }, + { + "type": "task", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "prepare", + "input": {"my_key": "value", "market": "DE"}, + "triggers": ["start:prepare"], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "prepare", + "result": [("my_key", " prepared")], + "error": None, + "interrupts": [], + }, + }, + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": { + "my_key": "value prepared", + "market": "DE", + }, + "metadata": { + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "10", + }, + "parent_config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "next": ["tool_two_slow"], + "tasks": [ + { + "id": AnyStr(), + "name": "tool_two_slow", + "interrupts": (), + "state": None, + } + ], + }, + }, + { + "type": "task", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "tool_two_slow", + "input": {"my_key": "value prepared", "market": "DE"}, + "triggers": ["branch:prepare:condition:tool_two_slow"], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "tool_two_slow", + "result": [("my_key", " slow")], + "error": None, + "interrupts": [], + }, + }, + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": { + "my_key": "value prepared slow", + "market": "DE", + }, + "metadata": { + "parents": {}, + "source": "loop", + "step": 2, + "writes": {"tool_two_slow": {"my_key": " slow"}}, + "thread_id": "10", + }, + "parent_config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "next": ["finish"], + "tasks": [ + {"id": AnyStr(), "name": "finish", "interrupts": (), "state": None} + ], + }, + }, + { + "type": "task", + "timestamp": AnyStr(), + "step": 3, + "payload": { + "id": AnyStr(), + "name": "finish", + "input": {"my_key": "value prepared slow", "market": "DE"}, + "triggers": ["branch:prepare:condition::then"], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 3, + "payload": { + "id": AnyStr(), + "name": "finish", + "result": [("my_key", " finished")], + "error": None, + "interrupts": [], + }, + }, + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": 3, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": { + "my_key": "value prepared slow finished", + "market": "DE", + }, + "metadata": { + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "10", + }, + "parent_config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "next": [], + "tasks": [], + }, + }, + ] + + tool_two = tool_two_graph.compile( + checkpointer=checkpointer, interrupt_before=["tool_two_fast", "tool_two_slow"] + ) + + # missing thread_id + with pytest.raises(ValueError, match="thread_id"): + tool_two.invoke({"my_key": "value", "market": "DE"}) + + thread1 = {"configurable": {"thread_id": "1"}} + # stop when about to enter node + assert tool_two.invoke({"my_key": "value", "market": "DE"}, thread1) == { + "my_key": "value prepared", + "market": "DE", + } + assert tool_two.get_state(thread1) == StateSnapshot( + values={"my_key": "value prepared", "market": "DE"}, + tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),), + next=("tool_two_slow",), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "1", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + # resume, for same result as above + assert tool_two.invoke(None, thread1, debug=1) == { + "my_key": "value prepared slow finished", + "market": "DE", + } + assert tool_two.get_state(thread1) == StateSnapshot( + values={"my_key": "value prepared slow finished", "market": "DE"}, + tasks=(), + next=(), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "1", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + + thread2 = {"configurable": {"thread_id": "2"}} + # stop when about to enter node + assert tool_two.invoke({"my_key": "value", "market": "US"}, thread2) == { + "my_key": "value prepared", + "market": "US", + } + assert tool_two.get_state(thread2) == StateSnapshot( + values={"my_key": "value prepared", "market": "US"}, + tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),), + next=("tool_two_fast",), + config=tool_two.checkpointer.get_tuple(thread2).config, + created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "2", + }, + parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config, + ) + # resume, for same result as above + assert tool_two.invoke(None, thread2, debug=1) == { + "my_key": "value prepared fast finished", + "market": "US", + } + assert tool_two.get_state(thread2) == StateSnapshot( + values={"my_key": "value prepared fast finished", "market": "US"}, + tasks=(), + next=(), + config=tool_two.checkpointer.get_tuple(thread2).config, + created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "2", + }, + parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config, + ) + + tool_two = tool_two_graph.compile( + checkpointer=checkpointer, interrupt_before=["finish"] + ) + + thread1 = {"configurable": {"thread_id": "11"}} + + # stop when about to enter node + assert tool_two.invoke({"my_key": "value", "market": "DE"}, thread1) == { + "my_key": "value prepared slow", + "market": "DE", + } + assert tool_two.get_state(thread1) == StateSnapshot( + values={ + "my_key": "value prepared slow", + "market": "DE", + }, + tasks=(PregelTask(AnyStr(), "finish", (PULL, "finish")),), + next=("finish",), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 2, + "writes": {"tool_two_slow": {"my_key": " slow"}}, + "thread_id": "11", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + + # update state + tool_two.update_state(thread1, {"my_key": "er"}) + assert tool_two.get_state(thread1) == StateSnapshot( + values={ + "my_key": "value prepared slower", + "market": "DE", + }, + tasks=(PregelTask(AnyStr(), "finish", (PULL, "finish")),), + next=("finish",), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 3, + "writes": {"tool_two_slow": {"my_key": "er"}}, + "thread_id": "11", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + + tool_two = tool_two_graph.compile( + checkpointer=checkpointer, interrupt_after=["prepare"] + ) + + # missing thread_id + with pytest.raises(ValueError, match="thread_id"): + tool_two.invoke({"my_key": "value", "market": "DE"}) + + thread1 = {"configurable": {"thread_id": "21"}} + # stop when about to enter node + assert tool_two.invoke({"my_key": "value", "market": "DE"}, thread1) == { + "my_key": "value prepared", + "market": "DE", + } + assert tool_two.get_state(thread1) == StateSnapshot( + values={"my_key": "value prepared", "market": "DE"}, + tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),), + next=("tool_two_slow",), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "21", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + # resume, for same result as above + assert tool_two.invoke(None, thread1, debug=1) == { + "my_key": "value prepared slow finished", + "market": "DE", + } + assert tool_two.get_state(thread1) == StateSnapshot( + values={"my_key": "value prepared slow finished", "market": "DE"}, + tasks=(), + next=(), + config=tool_two.checkpointer.get_tuple(thread1).config, + created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "21", + }, + parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config, + ) + + thread2 = {"configurable": {"thread_id": "22"}} + # stop when about to enter node + assert tool_two.invoke({"my_key": "value", "market": "US"}, thread2) == { + "my_key": "value prepared", + "market": "US", + } + assert tool_two.get_state(thread2) == StateSnapshot( + values={"my_key": "value prepared", "market": "US"}, + tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),), + next=("tool_two_fast",), + config=tool_two.checkpointer.get_tuple(thread2).config, + created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "22", + }, + parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config, + ) + # resume, for same result as above + assert tool_two.invoke(None, thread2, debug=1) == { + "my_key": "value prepared fast finished", + "market": "US", + } + assert tool_two.get_state(thread2) == StateSnapshot( + values={"my_key": "value prepared fast finished", "market": "US"}, + tasks=(), + next=(), + config=tool_two.checkpointer.get_tuple(thread2).config, + created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "22", + }, + parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config, + ) + + thread3 = {"configurable": {"thread_id": "23"}} + # update an empty thread before first run + uconfig = tool_two.update_state(thread3, {"my_key": "key", "market": "DE"}) + # check current state + assert tool_two.get_state(thread3) == StateSnapshot( + values={"my_key": "key", "market": "DE"}, + tasks=(PregelTask(AnyStr(), "prepare", (PULL, "prepare")),), + next=("prepare",), + config=uconfig, + created_at=AnyStr(), + metadata={ + "parents": {}, + "source": "update", + "step": 0, + "writes": {START: {"my_key": "key", "market": "DE"}}, + "thread_id": "23", + }, + parent_config=None, + ) + # run from this point + assert tool_two.invoke(None, thread3) == { + "my_key": "key prepared", + "market": "DE", + } + # get state after first node + assert tool_two.get_state(thread3) == StateSnapshot( + values={"my_key": "key prepared", "market": "DE"}, + tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),), + next=("tool_two_slow",), + config=tool_two.checkpointer.get_tuple(thread3).config, + created_at=tool_two.checkpointer.get_tuple(thread3).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "23", + }, + parent_config=uconfig, + ) + # resume, for same result as above + assert tool_two.invoke(None, thread3, debug=1) == { + "my_key": "key prepared slow finished", + "market": "DE", + } + assert tool_two.get_state(thread3) == StateSnapshot( + values={"my_key": "key prepared slow finished", "market": "DE"}, + tasks=(), + next=(), + config=tool_two.checkpointer.get_tuple(thread3).config, + created_at=tool_two.checkpointer.get_tuple(thread3).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "23", + }, + parent_config=[*tool_two.checkpointer.list(thread3, limit=2)][-1].config, + ) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_in_one_fan_out_state_graph_waiting_edge( + snapshot: SnapshotAssertion, request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer: BaseCheckpointSaver = request.getfixturevalue( + f"checkpointer_{checkpointer_name}" + ) + + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], sorted_add] + + workflow = StateGraph(State) + + @workflow.add_node + def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + def analyzer_one(data: State) -> State: + return {"query": f'analyzed: {data["query"]}'} + + def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + def retriever_two(data: State) -> State: + time.sleep(0.1) # to ensure stream order + return {"docs": ["doc3", "doc4"]} + + def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + workflow.add_node(analyzer_one) + workflow.add_node(retriever_one) + workflow.add_node(retriever_two) + workflow.add_node(qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_edge("rewrite_query", "retriever_two") + workflow.add_edge(["retriever_one", "retriever_two"], "qa") + workflow.set_finish_point("qa") + + app = workflow.compile() + + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + + assert app.invoke({"query": "what is weather in sf"}) == { + "query": "analyzed: query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + } + + assert [*app.stream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["retriever_one"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c for c in app_w_interrupt.stream({"query": "what is weather in sf"}, config) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"__interrupt__": ()}, + ] + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before=["qa"], + ) + config = {"configurable": {"thread_id": "2"}} + + assert [ + c for c in app_w_interrupt.stream({"query": "what is weather in sf"}, config) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"__interrupt__": ()}, + ] + + app_w_interrupt.update_state(config, {"docs": ["doc5"]}) + assert app_w_interrupt.get_state(config) == StateSnapshot( + values={ + "query": "analyzed: query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4", "doc5"], + }, + tasks=(PregelTask(AnyStr(), "qa", (PULL, "qa")),), + next=("qa",), + config=app_w_interrupt.checkpointer.get_tuple(config).config, + created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 4, + "writes": {"retriever_one": {"docs": ["doc5"]}}, + "thread_id": "2", + }, + parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config, + ) + + assert [c for c in app_w_interrupt.stream(None, config, debug=1)] == [ + {"qa": {"answer": "doc1,doc2,doc3,doc4,doc5"}}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_in_one_fan_out_state_graph_waiting_edge_via_branch( + snapshot: SnapshotAssertion, request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer: BaseCheckpointSaver = request.getfixturevalue( + f"checkpointer_{checkpointer_name}" + ) + + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], sorted_add] + + def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + def analyzer_one(data: State) -> State: + return {"query": f'analyzed: {data["query"]}'} + + def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + def retriever_two(data: State) -> State: + time.sleep(0.1) + return {"docs": ["doc3", "doc4"]} + + def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + def rewrite_query_then(data: State) -> Literal["retriever_two"]: + return "retriever_two" + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_conditional_edges("rewrite_query", rewrite_query_then) + workflow.add_edge(["retriever_one", "retriever_two"], "qa") + workflow.set_finish_point("qa") + + app = workflow.compile() + + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + + assert app.invoke({"query": "what is weather in sf"}, debug=True) == { + "query": "analyzed: query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + } + + assert [*app.stream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["retriever_one"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c for c in app_w_interrupt.stream({"query": "what is weather in sf"}, config) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"__interrupt__": ()}, + ] + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1( + snapshot: SnapshotAssertion, + mocker: MockerFixture, + request: pytest.FixtureRequest, + checkpointer_name: str, +) -> None: + from pydantic.v1 import BaseModel, ValidationError + + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + setup = mocker.Mock() + teardown = mocker.Mock() + + @contextmanager + def assert_ctx_once() -> Iterator[None]: + assert setup.call_count == 0 + assert teardown.call_count == 0 + try: + yield + finally: + assert setup.call_count == 1 + assert teardown.call_count == 1 + setup.reset_mock() + teardown.reset_mock() + + @contextmanager + def make_httpx_client() -> Iterator[httpx.Client]: + setup() + with httpx.Client() as client: + try: + yield client + finally: + teardown() + + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class InnerObject(BaseModel): + yo: int + + class State(BaseModel): + class Config: + arbitrary_types_allowed = True + + query: str + inner: InnerObject + answer: Optional[str] = None + docs: Annotated[list[str], sorted_add] + client: Annotated[httpx.Client, Context(make_httpx_client)] + + class Input(BaseModel): + query: str + inner: InnerObject + + class Output(BaseModel): + answer: str + docs: list[str] + + class StateUpdate(BaseModel): + query: Optional[str] = None + answer: Optional[str] = None + docs: Optional[list[str]] = None + + def rewrite_query(data: State) -> State: + return {"query": f"query: {data.query}"} + + def analyzer_one(data: State) -> State: + return StateUpdate(query=f"analyzed: {data.query}") + + def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + def retriever_two(data: State) -> State: + time.sleep(0.1) + return {"docs": ["doc3", "doc4"]} + + def qa(data: State) -> State: + return {"answer": ",".join(data.docs)} + + def decider(data: State) -> str: + assert isinstance(data, State) + return "retriever_two" + + workflow = StateGraph(State, input=Input, output=Output) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_conditional_edges( + "rewrite_query", decider, {"retriever_two": "retriever_two"} + ) + workflow.add_edge(["retriever_one", "retriever_two"], "qa") + workflow.set_finish_point("qa") + + app = workflow.compile() + + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + assert app.get_input_jsonschema() == snapshot + assert app.get_output_jsonschema() == snapshot + + with pytest.raises(ValidationError), assert_ctx_once(): + app.invoke({"query": {}}) + + with assert_ctx_once(): + assert app.invoke({"query": "what is weather in sf", "inner": {"yo": 1}}) == { + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + } + + with assert_ctx_once(): + assert [ + *app.stream({"query": "what is weather in sf", "inner": {"yo": 1}}) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["retriever_one"], + ) + config = {"configurable": {"thread_id": "1"}} + + with assert_ctx_once(): + assert [ + c + for c in app_w_interrupt.stream( + {"query": "what is weather in sf", "inner": {"yo": 1}}, config + ) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"__interrupt__": ()}, + ] + + with assert_ctx_once(): + assert [c for c in app_w_interrupt.stream(None, config)] == [ + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + with assert_ctx_once(): + assert app_w_interrupt.update_state( + config, {"docs": ["doc5"]}, as_node="rewrite_query" + ) == { + "configurable": { + "thread_id": "1", + "checkpoint_id": AnyStr(), + "checkpoint_ns": "", + } + } + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2( + snapshot: SnapshotAssertion, + mocker: MockerFixture, + request: pytest.FixtureRequest, + checkpointer_name: str, +) -> None: + from pydantic import BaseModel, ConfigDict, ValidationError + + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + setup = mocker.Mock() + teardown = mocker.Mock() + + @contextmanager + def assert_ctx_once() -> Iterator[None]: + assert setup.call_count == 0 + assert teardown.call_count == 0 + try: + yield + finally: + assert setup.call_count == 1 + assert teardown.call_count == 1 + setup.reset_mock() + teardown.reset_mock() + + @contextmanager + def make_httpx_client() -> Iterator[httpx.Client]: + setup() + with httpx.Client() as client: + try: + yield client + finally: + teardown() + + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class InnerObject(BaseModel): + yo: int + + class State(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + query: str + inner: InnerObject + answer: Optional[str] = None + docs: Annotated[list[str], sorted_add] + client: Annotated[httpx.Client, Context(make_httpx_client)] + + class StateUpdate(BaseModel): + query: Optional[str] = None + answer: Optional[str] = None + docs: Optional[list[str]] = None + + class Input(BaseModel): + query: str + inner: InnerObject + + class Output(BaseModel): + answer: str + docs: list[str] + + def rewrite_query(data: State) -> State: + return {"query": f"query: {data.query}"} + + def analyzer_one(data: State) -> State: + return StateUpdate(query=f"analyzed: {data.query}") + + def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + def retriever_two(data: State) -> State: + time.sleep(0.1) + return {"docs": ["doc3", "doc4"]} + + def qa(data: State) -> State: + return {"answer": ",".join(data.docs)} + + def decider(data: State) -> str: + assert isinstance(data, State) + return "retriever_two" + + workflow = StateGraph(State, input=Input, output=Output) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_conditional_edges( + "rewrite_query", decider, {"retriever_two": "retriever_two"} + ) + workflow.add_edge(["retriever_one", "retriever_two"], "qa") + workflow.set_finish_point("qa") + + app = workflow.compile() + + if SHOULD_CHECK_SNAPSHOTS: + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + assert app.get_input_schema().model_json_schema() == snapshot + assert app.get_output_schema().model_json_schema() == snapshot + + with pytest.raises(ValidationError), assert_ctx_once(): + app.invoke({"query": {}}) + + with assert_ctx_once(): + assert app.invoke({"query": "what is weather in sf", "inner": {"yo": 1}}) == { + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + } + + with assert_ctx_once(): + assert [ + *app.stream({"query": "what is weather in sf", "inner": {"yo": 1}}) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["retriever_one"], + ) + config = {"configurable": {"thread_id": "1"}} + + with assert_ctx_once(): + assert [ + c + for c in app_w_interrupt.stream( + {"query": "what is weather in sf", "inner": {"yo": 1}}, config + ) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"__interrupt__": ()}, + ] + + with assert_ctx_once(): + assert [c for c in app_w_interrupt.stream(None, config)] == [ + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + with assert_ctx_once(): + assert app_w_interrupt.update_state( + config, {"docs": ["doc5"]}, as_node="rewrite_query" + ) == { + "configurable": { + "thread_id": "1", + "checkpoint_id": AnyStr(), + "checkpoint_ns": "", + } + } + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_in_one_fan_out_state_graph_waiting_edge_plus_regular( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer: BaseCheckpointSaver = request.getfixturevalue( + f"checkpointer_{checkpointer_name}" + ) + + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], sorted_add] + + def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + def analyzer_one(data: State) -> State: + time.sleep(0.1) + return {"query": f'analyzed: {data["query"]}'} + + def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + def retriever_two(data: State) -> State: + time.sleep(0.2) + return {"docs": ["doc3", "doc4"]} + + def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_edge("rewrite_query", "retriever_two") + workflow.add_edge(["retriever_one", "retriever_two"], "qa") + workflow.set_finish_point("qa") + + # silly edge, to make sure having been triggered before doesn't break + # semantics of named barrier (== waiting edges) + workflow.add_edge("rewrite_query", "qa") + + app = workflow.compile() + + assert app.invoke({"query": "what is weather in sf"}) == { + "query": "analyzed: query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + } + + assert [*app.stream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"qa": {"answer": ""}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["retriever_one"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c for c in app_w_interrupt.stream({"query": "what is weather in sf"}, config) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"qa": {"answer": ""}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"__interrupt__": ()}, + ] + + assert [c for c in app_w_interrupt.stream(None, config)] == [ + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + +def test_in_one_fan_out_state_graph_waiting_edge_multiple() -> None: + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], sorted_add] + + def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + def analyzer_one(data: State) -> State: + return {"query": f'analyzed: {data["query"]}'} + + def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + def retriever_two(data: State) -> State: + time.sleep(0.1) + return {"docs": ["doc3", "doc4"]} + + def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + def decider(data: State) -> None: + return None + + def decider_cond(data: State) -> str: + if data["query"].count("analyzed") > 1: + return "qa" + else: + return "rewrite_query" + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("decider", decider) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_edge("rewrite_query", "retriever_two") + workflow.add_edge(["retriever_one", "retriever_two"], "decider") + workflow.add_conditional_edges("decider", decider_cond) + workflow.set_finish_point("qa") + + app = workflow.compile() + + assert app.invoke({"query": "what is weather in sf"}) == { + "query": "analyzed: query: analyzed: query: what is weather in sf", + "answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4", + "docs": ["doc1", "doc1", "doc2", "doc2", "doc3", "doc3", "doc4", "doc4"], + } + + assert [*app.stream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"decider": None}, + {"rewrite_query": {"query": "query: analyzed: query: what is weather in sf"}}, + { + "analyzer_one": { + "query": "analyzed: query: analyzed: query: what is weather in sf" + } + }, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"decider": None}, + {"qa": {"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4"}}, + ] + + +def test_callable_in_conditional_edges_with_no_path_map() -> None: + class State(TypedDict, total=False): + query: str + + def rewrite(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + def analyze(data: State) -> State: + return {"query": f'analyzed: {data["query"]}'} + + class ChooseAnalyzer: + def __call__(self, data: State) -> str: + return "analyzer" + + workflow = StateGraph(State) + workflow.add_node("rewriter", rewrite) + workflow.add_node("analyzer", analyze) + workflow.add_conditional_edges("rewriter", ChooseAnalyzer()) + workflow.set_entry_point("rewriter") + app = workflow.compile() + + assert app.invoke({"query": "what is weather in sf"}) == { + "query": "analyzed: query: what is weather in sf", + } + + +def test_function_in_conditional_edges_with_no_path_map() -> None: + class State(TypedDict, total=False): + query: str + + def rewrite(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + def analyze(data: State) -> State: + return {"query": f'analyzed: {data["query"]}'} + + def choose_analyzer(data: State) -> str: + return "analyzer" + + workflow = StateGraph(State) + workflow.add_node("rewriter", rewrite) + workflow.add_node("analyzer", analyze) + workflow.add_conditional_edges("rewriter", choose_analyzer) + workflow.set_entry_point("rewriter") + app = workflow.compile() + + assert app.invoke({"query": "what is weather in sf"}) == { + "query": "analyzed: query: what is weather in sf", + } + + +def test_in_one_fan_out_state_graph_waiting_edge_multiple_cond_edge() -> None: + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], sorted_add] + + def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + def retriever_picker(data: State) -> list[str]: + return ["analyzer_one", "retriever_two"] + + def analyzer_one(data: State) -> State: + return {"query": f'analyzed: {data["query"]}'} + + def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + def retriever_two(data: State) -> State: + time.sleep(0.1) + return {"docs": ["doc3", "doc4"]} + + def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + def decider(data: State) -> None: + return None + + def decider_cond(data: State) -> str: + if data["query"].count("analyzed") > 1: + return "qa" + else: + return "rewrite_query" + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("decider", decider) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_conditional_edges("rewrite_query", retriever_picker) + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_edge(["retriever_one", "retriever_two"], "decider") + workflow.add_conditional_edges("decider", decider_cond) + workflow.set_finish_point("qa") + + app = workflow.compile() + + assert app.invoke({"query": "what is weather in sf"}) == { + "query": "analyzed: query: analyzed: query: what is weather in sf", + "answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4", + "docs": ["doc1", "doc1", "doc2", "doc2", "doc3", "doc3", "doc4", "doc4"], + } + + assert [*app.stream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"decider": None}, + {"rewrite_query": {"query": "query: analyzed: query: what is weather in sf"}}, + { + "analyzer_one": { + "query": "analyzed: query: analyzed: query: what is weather in sf" + } + }, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"decider": None}, + {"qa": {"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4"}}, + ] + + +def test_simple_multi_edge(snapshot: SnapshotAssertion) -> None: + class State(TypedDict): + my_key: Annotated[str, operator.add] + + def up(state: State): + pass + + def side(state: State): + pass + + def other(state: State): + return {"my_key": "_more"} + + def down(state: State): + pass + + graph = StateGraph(State) + + graph.add_node("up", up) + graph.add_node("side", side) + graph.add_node("other", other) + graph.add_node("down", down) + + graph.set_entry_point("up") + graph.add_edge("up", "side") + graph.add_edge("up", "other") + graph.add_edge(["up", "side"], "down") + graph.set_finish_point("down") + + app = graph.compile() + + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + assert app.invoke({"my_key": "my_value"}) == {"my_key": "my_value_more"} + assert [*app.stream({"my_key": "my_value"})] in ( + [ + {"up": None}, + {"side": None}, + {"other": {"my_key": "_more"}}, + {"down": None}, + ], + [ + {"up": None}, + {"other": {"my_key": "_more"}}, + {"side": None}, + {"down": None}, + ], + ) + + +def test_nested_graph_xray(snapshot: SnapshotAssertion) -> None: + class State(TypedDict): + my_key: Annotated[str, operator.add] + market: str + + def logic(state: State): + pass + + tool_two_graph = StateGraph(State) + tool_two_graph.add_node("tool_two_slow", logic) + tool_two_graph.add_node("tool_two_fast", logic) + tool_two_graph.set_conditional_entry_point( + lambda s: "tool_two_slow" if s["market"] == "DE" else "tool_two_fast", + then=END, + ) + tool_two = tool_two_graph.compile() + + graph = StateGraph(State) + graph.add_node("tool_one", logic) + graph.add_node("tool_two", tool_two) + graph.add_node("tool_three", logic) + graph.set_conditional_entry_point(lambda s: "tool_one", then=END) + app = graph.compile() + + assert app.get_graph(xray=True).to_json() == snapshot + assert app.get_graph(xray=True).draw_mermaid() == snapshot + + +def test_nested_graph(snapshot: SnapshotAssertion) -> None: + def never_called_fn(state: Any): + assert 0, "This function should never be called" + + never_called = RunnableLambda(never_called_fn) + + class InnerState(TypedDict): + my_key: str + my_other_key: str + + def up(state: InnerState): + return {"my_key": state["my_key"] + " there", "my_other_key": state["my_key"]} + + inner = StateGraph(InnerState) + inner.add_node("up", up) + inner.set_entry_point("up") + inner.set_finish_point("up") + + class State(TypedDict): + my_key: str + never_called: Any + + def side(state: State): + return {"my_key": state["my_key"] + " and back again"} + + graph = StateGraph(State) + graph.add_node("inner", inner.compile()) + graph.add_node("side", side) + graph.set_entry_point("inner") + graph.add_edge("inner", "side") + graph.set_finish_point("side") + + app = graph.compile() + + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + assert app.get_graph(xray=True).draw_mermaid() == snapshot + assert app.invoke( + {"my_key": "my value", "never_called": never_called}, debug=True + ) == { + "my_key": "my value there and back again", + "never_called": never_called, + } + assert [*app.stream({"my_key": "my value", "never_called": never_called})] == [ + {"inner": {"my_key": "my value there"}}, + {"side": {"my_key": "my value there and back again"}}, + ] + assert [ + *app.stream( + {"my_key": "my value", "never_called": never_called}, stream_mode="values" + ) + ] == [ + { + "my_key": "my value", + "never_called": never_called, + }, + { + "my_key": "my value there", + "never_called": never_called, + }, + { + "my_key": "my value there and back again", + "never_called": never_called, + }, + ] + + chain = app | RunnablePassthrough() + + assert chain.invoke({"my_key": "my value", "never_called": never_called}) == { + "my_key": "my value there and back again", + "never_called": never_called, + } + assert [*chain.stream({"my_key": "my value", "never_called": never_called})] == [ + {"inner": {"my_key": "my value there"}}, + {"side": {"my_key": "my value there and back again"}}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_stream_subgraphs_during_execution( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name) + + class InnerState(TypedDict): + my_key: Annotated[str, operator.add] + my_other_key: str + + def inner_1(state: InnerState): + return {"my_key": "got here", "my_other_key": state["my_key"]} + + def inner_2(state: InnerState): + time.sleep(0.5) + return { + "my_key": " and there", + "my_other_key": state["my_key"], + } + + inner = StateGraph(InnerState) + inner.add_node("inner_1", inner_1) + inner.add_node("inner_2", inner_2) + inner.add_edge("inner_1", "inner_2") + inner.set_entry_point("inner_1") + inner.set_finish_point("inner_2") + + class State(TypedDict): + my_key: Annotated[str, operator.add] + + def outer_1(state: State): + time.sleep(0.2) + return {"my_key": " and parallel"} + + def outer_2(state: State): + return {"my_key": " and back again"} + + graph = StateGraph(State) + graph.add_node("inner", inner.compile()) + graph.add_node("outer_1", outer_1) + graph.add_node("outer_2", outer_2) + + graph.add_edge(START, "inner") + graph.add_edge(START, "outer_1") + graph.add_edge(["inner", "outer_1"], "outer_2") + graph.add_edge("outer_2", END) + + app = graph.compile(checkpointer=checkpointer) + + start = time.perf_counter() + chunks: list[tuple[float, Any]] = [] + config = {"configurable": {"thread_id": "2"}} + for c in app.stream({"my_key": ""}, config, subgraphs=True): + chunks.append((round(time.perf_counter() - start, 1), c)) + for idx in range(len(chunks)): + elapsed, c = chunks[idx] + chunks[idx] = (round(elapsed - chunks[0][0], 1), c) + + assert chunks == [ + # arrives before "inner" finishes + ( + FloatBetween(0.0, 0.1), + ( + (AnyStr("inner:"),), + {"inner_1": {"my_key": "got here", "my_other_key": ""}}, + ), + ), + (FloatBetween(0.2, 0.3), ((), {"outer_1": {"my_key": " and parallel"}})), + ( + FloatBetween(0.5, 0.8), + ( + (AnyStr("inner:"),), + {"inner_2": {"my_key": " and there", "my_other_key": "got here"}}, + ), + ), + (FloatBetween(0.5, 0.8), ((), {"inner": {"my_key": "got here and there"}})), + (FloatBetween(0.5, 0.8), ((), {"outer_2": {"my_key": " and back again"}})), + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_stream_buffering_single_node( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name) + + class State(TypedDict): + my_key: Annotated[str, operator.add] + + def node(state: State, writer: StreamWriter): + writer("Before sleep") + time.sleep(0.2) + writer("After sleep") + return {"my_key": "got here"} + + builder = StateGraph(State) + builder.add_node("node", node) + builder.add_edge(START, "node") + builder.add_edge("node", END) + graph = builder.compile(checkpointer=checkpointer) + + start = time.perf_counter() + chunks: list[tuple[float, Any]] = [] + config = {"configurable": {"thread_id": "2"}} + for c in graph.stream({"my_key": ""}, config, stream_mode="custom"): + chunks.append((round(time.perf_counter() - start, 1), c)) + + assert chunks == [ + (FloatBetween(0.0, 0.1), "Before sleep"), + (FloatBetween(0.2, 0.3), "After sleep"), + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_nested_graph_interrupts_parallel( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name) + + class InnerState(TypedDict): + my_key: Annotated[str, operator.add] + my_other_key: str + + def inner_1(state: InnerState): + time.sleep(0.1) + return {"my_key": "got here", "my_other_key": state["my_key"]} + + def inner_2(state: InnerState): + return { + "my_key": " and there", + "my_other_key": state["my_key"], + } + + inner = StateGraph(InnerState) + inner.add_node("inner_1", inner_1) + inner.add_node("inner_2", inner_2) + inner.add_edge("inner_1", "inner_2") + inner.set_entry_point("inner_1") + inner.set_finish_point("inner_2") + + class State(TypedDict): + my_key: Annotated[str, operator.add] + + def outer_1(state: State): + return {"my_key": " and parallel"} + + def outer_2(state: State): + return {"my_key": " and back again"} + + graph = StateGraph(State) + graph.add_node("inner", inner.compile(interrupt_before=["inner_2"])) + graph.add_node("outer_1", outer_1) + graph.add_node("outer_2", outer_2) + + graph.add_edge(START, "inner") + graph.add_edge(START, "outer_1") + graph.add_edge(["inner", "outer_1"], "outer_2") + graph.set_finish_point("outer_2") + + app = graph.compile(checkpointer=checkpointer) + + # test invoke w/ nested interrupt + config = {"configurable": {"thread_id": "1"}} + assert app.invoke({"my_key": ""}, config, debug=True) == { + "my_key": " and parallel", + } + + assert app.invoke(None, config, debug=True) == { + "my_key": "got here and there and parallel and back again", + } + + # below combo of assertions is asserting two things + # - outer_1 finishes before inner interrupts (because we see its output in stream, which only happens after node finishes) + # - the writes of outer are persisted in 1st call and used in 2nd call, ie outer isn't called again (because we dont see outer_1 output again in 2nd stream) + # test stream updates w/ nested interrupt + config = {"configurable": {"thread_id": "2"}} + assert [*app.stream({"my_key": ""}, config, subgraphs=True)] == [ + # we got to parallel node first + ((), {"outer_1": {"my_key": " and parallel"}}), + ((AnyStr("inner:"),), {"inner_1": {"my_key": "got here", "my_other_key": ""}}), + ((), {"__interrupt__": ()}), + ] + assert [*app.stream(None, config)] == [ + {"outer_1": {"my_key": " and parallel"}, "__metadata__": {"cached": True}}, + {"inner": {"my_key": "got here and there"}}, + {"outer_2": {"my_key": " and back again"}}, + ] + + # test stream values w/ nested interrupt + config = {"configurable": {"thread_id": "3"}} + assert [*app.stream({"my_key": ""}, config, stream_mode="values")] == [ + {"my_key": ""}, + {"my_key": " and parallel"}, + ] + assert [*app.stream(None, config, stream_mode="values")] == [ + {"my_key": ""}, + {"my_key": "got here and there and parallel"}, + {"my_key": "got here and there and parallel and back again"}, + ] + + # test interrupts BEFORE the parallel node + app = graph.compile(checkpointer=checkpointer, interrupt_before=["outer_1"]) + config = {"configurable": {"thread_id": "4"}} + assert [*app.stream({"my_key": ""}, config, stream_mode="values")] == [ + {"my_key": ""} + ] + # while we're waiting for the node w/ interrupt inside to finish + assert [*app.stream(None, config, stream_mode="values")] == [ + {"my_key": ""}, + {"my_key": " and parallel"}, + ] + assert [*app.stream(None, config, stream_mode="values")] == [ + {"my_key": ""}, + {"my_key": "got here and there and parallel"}, + {"my_key": "got here and there and parallel and back again"}, + ] + + # test interrupts AFTER the parallel node + app = graph.compile(checkpointer=checkpointer, interrupt_after=["outer_1"]) + config = {"configurable": {"thread_id": "5"}} + assert [*app.stream({"my_key": ""}, config, stream_mode="values")] == [ + {"my_key": ""}, + {"my_key": " and parallel"}, + ] + assert [*app.stream(None, config, stream_mode="values")] == [ + {"my_key": ""}, + {"my_key": "got here and there and parallel"}, + ] + assert [*app.stream(None, config, stream_mode="values")] == [ + {"my_key": "got here and there and parallel"}, + {"my_key": "got here and there and parallel and back again"}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_doubly_nested_graph_interrupts( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name) + + class State(TypedDict): + my_key: str + + class ChildState(TypedDict): + my_key: str + + class GrandChildState(TypedDict): + my_key: str + + def grandchild_1(state: ChildState): + return {"my_key": state["my_key"] + " here"} + + def grandchild_2(state: ChildState): + return { + "my_key": state["my_key"] + " and there", + } + + grandchild = StateGraph(GrandChildState) + grandchild.add_node("grandchild_1", grandchild_1) + grandchild.add_node("grandchild_2", grandchild_2) + grandchild.add_edge("grandchild_1", "grandchild_2") + grandchild.set_entry_point("grandchild_1") + grandchild.set_finish_point("grandchild_2") + + child = StateGraph(ChildState) + child.add_node( + "child_1", + grandchild.compile(interrupt_before=["grandchild_2"]), + ) + child.set_entry_point("child_1") + child.set_finish_point("child_1") + + def parent_1(state: State): + return {"my_key": "hi " + state["my_key"]} + + def parent_2(state: State): + return {"my_key": state["my_key"] + " and back again"} + + graph = StateGraph(State) + graph.add_node("parent_1", parent_1) + graph.add_node("child", child.compile()) + graph.add_node("parent_2", parent_2) + graph.set_entry_point("parent_1") + graph.add_edge("parent_1", "child") + graph.add_edge("child", "parent_2") + graph.set_finish_point("parent_2") + + app = graph.compile(checkpointer=checkpointer) + + # test invoke w/ nested interrupt + config = {"configurable": {"thread_id": "1"}} + assert app.invoke({"my_key": "my value"}, config, debug=True) == { + "my_key": "hi my value", + } + + assert app.invoke(None, config, debug=True) == { + "my_key": "hi my value here and there and back again", + } + + # test stream updates w/ nested interrupt + nodes: list[str] = [] + config = { + "configurable": {"thread_id": "2", CONFIG_KEY_NODE_FINISHED: nodes.append} + } + assert [*app.stream({"my_key": "my value"}, config)] == [ + {"parent_1": {"my_key": "hi my value"}}, + {"__interrupt__": ()}, + ] + assert nodes == ["parent_1", "grandchild_1"] + assert [*app.stream(None, config)] == [ + {"child": {"my_key": "hi my value here and there"}}, + {"parent_2": {"my_key": "hi my value here and there and back again"}}, + ] + assert nodes == [ + "parent_1", + "grandchild_1", + "grandchild_2", + "child_1", + "child", + "parent_2", + ] + + # test stream values w/ nested interrupt + config = {"configurable": {"thread_id": "3"}} + assert [*app.stream({"my_key": "my value"}, config, stream_mode="values")] == [ + {"my_key": "my value"}, + {"my_key": "hi my value"}, + ] + assert [*app.stream(None, config, stream_mode="values")] == [ + {"my_key": "hi my value"}, + {"my_key": "hi my value here and there"}, + {"my_key": "hi my value here and there and back again"}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_nested_graph_state( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name) + + class InnerState(TypedDict): + my_key: str + my_other_key: str + + def inner_1(state: InnerState): + return { + "my_key": state["my_key"] + " here", + "my_other_key": state["my_key"], + } + + def inner_2(state: InnerState): + return { + "my_key": state["my_key"] + " and there", + "my_other_key": state["my_key"], + } + + inner = StateGraph(InnerState) + inner.add_node("inner_1", inner_1) + inner.add_node("inner_2", inner_2) + inner.add_edge("inner_1", "inner_2") + inner.set_entry_point("inner_1") + inner.set_finish_point("inner_2") + + class State(TypedDict): + my_key: str + other_parent_key: str + + def outer_1(state: State): + return {"my_key": "hi " + state["my_key"]} + + def outer_2(state: State): + return {"my_key": state["my_key"] + " and back again"} + + graph = StateGraph(State) + graph.add_node("outer_1", outer_1) + graph.add_node( + "inner", + inner.compile(interrupt_before=["inner_2"]), + ) + graph.add_node("outer_2", outer_2) + graph.set_entry_point("outer_1") + graph.add_edge("outer_1", "inner") + graph.add_edge("inner", "outer_2") + graph.set_finish_point("outer_2") + + app = graph.compile(checkpointer=checkpointer) + + config = {"configurable": {"thread_id": "1"}} + app.invoke({"my_key": "my value"}, config, debug=True) + # test state w/ nested subgraph state (right after interrupt) + # first get_state without subgraph state + assert app.get_state(config) == StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "inner", + (PULL, "inner"), + state={"configurable": {"thread_id": "1", "checkpoint_ns": AnyStr()}}, + ), + ), + next=("inner",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"outer_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + # now, get_state with subgraphs state + assert app.get_state(config, subgraphs=True) == StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "inner", + (PULL, "inner"), + state=StateSnapshot( + values={ + "my_key": "hi my value here", + "my_other_key": "hi my value", + }, + tasks=( + PregelTask( + AnyStr(), + "inner_2", + (PULL, "inner_2"), + ), + ), + next=("inner_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "parents": { + "": AnyStr(), + }, + "source": "loop", + "writes": { + "inner_1": { + "my_key": "hi my value here", + "my_other_key": "hi my value", + } + }, + "step": 1, + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "langgraph_node": "inner", + "langgraph_path": [PULL, "inner"], + "langgraph_step": 2, + "langgraph_triggers": ["outer_1"], + "langgraph_checkpoint_ns": AnyStr("inner:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + ), + ), + ), + next=("inner",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"outer_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + # get_state_history returns outer graph checkpoints + history = list(app.get_state_history(config)) + assert history == [ + StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "inner", + (PULL, "inner"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + } + }, + ), + ), + next=("inner",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"outer_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "my value"}, + tasks=( + PregelTask( + AnyStr(), + "outer_1", + (PULL, "outer_1"), + result={"my_key": "hi my value"}, + ), + ), + next=("outer_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": None, + "step": 0, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={}, + tasks=( + PregelTask( + AnyStr(), + "__start__", + (PULL, "__start__"), + result={"my_key": "my value"}, + ), + ), + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "writes": {"__start__": {"my_key": "my value"}}, + "step": -1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ), + ] + # get_state_history for a subgraph returns its checkpoints + child_history = [*app.get_state_history(history[0].tasks[0].state)] + assert child_history == [ + StateSnapshot( + values={"my_key": "hi my value here", "my_other_key": "hi my value"}, + next=("inner_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "loop", + "writes": { + "inner_1": { + "my_key": "hi my value here", + "my_other_key": "hi my value", + } + }, + "step": 1, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "langgraph_node": "inner", + "langgraph_path": [PULL, "inner"], + "langgraph_step": 2, + "langgraph_triggers": ["outer_1"], + "langgraph_checkpoint_ns": AnyStr("inner:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + tasks=(PregelTask(AnyStr(), "inner_2", (PULL, "inner_2")),), + ), + StateSnapshot( + values={"my_key": "hi my value"}, + next=("inner_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "loop", + "writes": None, + "step": 0, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "langgraph_node": "inner", + "langgraph_path": [PULL, "inner"], + "langgraph_step": 2, + "langgraph_triggers": ["outer_1"], + "langgraph_checkpoint_ns": AnyStr("inner:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + tasks=( + PregelTask( + AnyStr(), + "inner_1", + (PULL, "inner_1"), + result={ + "my_key": "hi my value here", + "my_other_key": "hi my value", + }, + ), + ), + ), + StateSnapshot( + values={}, + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "input", + "writes": {"__start__": {"my_key": "hi my value"}}, + "step": -1, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "langgraph_node": "inner", + "langgraph_path": [PULL, "inner"], + "langgraph_step": 2, + "langgraph_triggers": ["outer_1"], + "langgraph_checkpoint_ns": AnyStr("inner:"), + }, + created_at=AnyStr(), + parent_config=None, + tasks=( + PregelTask( + AnyStr(), + "__start__", + (PULL, "__start__"), + result={"my_key": "hi my value"}, + ), + ), + ), + ] + + # resume + app.invoke(None, config, debug=True) + # test state w/ nested subgraph state (after resuming from interrupt) + assert app.get_state(config) == StateSnapshot( + values={"my_key": "hi my value here and there and back again"}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "outer_2": {"my_key": "hi my value here and there and back again"} + }, + "step": 3, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + # test full history at the end + actual_history = list(app.get_state_history(config)) + expected_history = [ + StateSnapshot( + values={"my_key": "hi my value here and there and back again"}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "outer_2": {"my_key": "hi my value here and there and back again"} + }, + "step": 3, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "hi my value here and there"}, + tasks=( + PregelTask( + AnyStr(), + "outer_2", + (PULL, "outer_2"), + result={"my_key": "hi my value here and there and back again"}, + ), + ), + next=("outer_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"inner": {"my_key": "hi my value here and there"}}, + "step": 2, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "inner", + (PULL, "inner"), + state={ + "configurable": {"thread_id": "1", "checkpoint_ns": AnyStr()} + }, + result={"my_key": "hi my value here and there"}, + ), + ), + next=("inner",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"outer_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "my value"}, + tasks=( + PregelTask( + AnyStr(), + "outer_1", + (PULL, "outer_1"), + result={"my_key": "hi my value"}, + ), + ), + next=("outer_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": None, + "step": 0, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={}, + tasks=( + PregelTask( + AnyStr(), + "__start__", + (PULL, "__start__"), + result={"my_key": "my value"}, + ), + ), + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "writes": {"__start__": {"my_key": "my value"}}, + "step": -1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ), + ] + assert actual_history == expected_history + # test looking up parent state by checkpoint ID + for actual_snapshot, expected_snapshot in zip(actual_history, expected_history): + assert app.get_state(actual_snapshot.config) == expected_snapshot + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_doubly_nested_graph_state( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name) + + class State(TypedDict): + my_key: str + + class ChildState(TypedDict): + my_key: str + + class GrandChildState(TypedDict): + my_key: str + + def grandchild_1(state: ChildState): + return {"my_key": state["my_key"] + " here"} + + def grandchild_2(state: ChildState): + return { + "my_key": state["my_key"] + " and there", + } + + grandchild = StateGraph(GrandChildState) + grandchild.add_node("grandchild_1", grandchild_1) + grandchild.add_node("grandchild_2", grandchild_2) + grandchild.add_edge("grandchild_1", "grandchild_2") + grandchild.set_entry_point("grandchild_1") + grandchild.set_finish_point("grandchild_2") + + child = StateGraph(ChildState) + child.add_node( + "child_1", + grandchild.compile(interrupt_before=["grandchild_2"]), + ) + child.set_entry_point("child_1") + child.set_finish_point("child_1") + + def parent_1(state: State): + return {"my_key": "hi " + state["my_key"]} + + def parent_2(state: State): + return {"my_key": state["my_key"] + " and back again"} + + graph = StateGraph(State) + graph.add_node("parent_1", parent_1) + graph.add_node("child", child.compile()) + graph.add_node("parent_2", parent_2) + graph.set_entry_point("parent_1") + graph.add_edge("parent_1", "child") + graph.add_edge("child", "parent_2") + graph.set_finish_point("parent_2") + + app = graph.compile(checkpointer=checkpointer) + + # test invoke w/ nested interrupt + config = {"configurable": {"thread_id": "1"}} + assert [c for c in app.stream({"my_key": "my value"}, config, subgraphs=True)] == [ + ((), {"parent_1": {"my_key": "hi my value"}}), + ( + (AnyStr("child:"), AnyStr("child_1:")), + {"grandchild_1": {"my_key": "hi my value here"}}, + ), + ((), {"__interrupt__": ()}), + ] + # get state without subgraphs + outer_state = app.get_state(config) + assert outer_state == StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "child", + (PULL, "child"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child"), + } + }, + ), + ), + next=("child",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"parent_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + child_state = app.get_state(outer_state.tasks[0].state) + assert ( + child_state.tasks[0] + == StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "child_1", + (PULL, "child_1"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + } + }, + ), + ), + next=("child_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {"": AnyStr()}, + "source": "loop", + "writes": None, + "step": 0, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + } + }, + ).tasks[0] + ) + grandchild_state = app.get_state(child_state.tasks[0].state) + assert grandchild_state == StateSnapshot( + values={"my_key": "hi my value here"}, + tasks=( + PregelTask( + AnyStr(), + "grandchild_2", + (PULL, "grandchild_2"), + ), + ), + next=("grandchild_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + metadata={ + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "source": "loop", + "writes": {"grandchild_1": {"my_key": "hi my value here"}}, + "step": 1, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [PULL, AnyStr("child_1")], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + ) + # get state with subgraphs + assert app.get_state(config, subgraphs=True) == StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "child", + (PULL, "child"), + state=StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "child_1", + (PULL, "child_1"), + state=StateSnapshot( + values={"my_key": "hi my value here"}, + tasks=( + PregelTask( + AnyStr(), + "grandchild_2", + (PULL, "grandchild_2"), + ), + ), + next=("grandchild_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr( + re.compile(r"child:.+|child1:") + ): AnyStr(), + } + ), + } + }, + metadata={ + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "source": "loop", + "writes": { + "grandchild_1": {"my_key": "hi my value here"} + }, + "step": 1, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [ + PULL, + AnyStr("child_1"), + ], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr( + re.compile(r"child:.+|child1:") + ): AnyStr(), + } + ), + } + }, + ), + ), + ), + next=("child_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "parents": {"": AnyStr()}, + "source": "loop", + "writes": None, + "step": 0, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child", + "langgraph_path": [PULL, AnyStr("child")], + "langgraph_step": 2, + "langgraph_triggers": [AnyStr("parent_1")], + "langgraph_checkpoint_ns": AnyStr("child:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + ), + ), + ), + next=("child",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"parent_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + # # resume + assert [c for c in app.stream(None, config, subgraphs=True)] == [ + ( + (AnyStr("child:"), AnyStr("child_1:")), + {"grandchild_2": {"my_key": "hi my value here and there"}}, + ), + ((AnyStr("child:"),), {"child_1": {"my_key": "hi my value here and there"}}), + ((), {"child": {"my_key": "hi my value here and there"}}), + ((), {"parent_2": {"my_key": "hi my value here and there and back again"}}), + ] + # get state with and without subgraphs + assert ( + app.get_state(config) + == app.get_state(config, subgraphs=True) + == StateSnapshot( + values={"my_key": "hi my value here and there and back again"}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "parent_2": {"my_key": "hi my value here and there and back again"} + }, + "step": 3, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + ) + # get outer graph history + outer_history = list(app.get_state_history(config)) + assert outer_history == [ + StateSnapshot( + values={"my_key": "hi my value here and there and back again"}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "parent_2": {"my_key": "hi my value here and there and back again"} + }, + "step": 3, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "hi my value here and there"}, + next=("parent_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": {"child": {"my_key": "hi my value here and there"}}, + "step": 2, + "parents": {}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="parent_2", + path=(PULL, "parent_2"), + result={"my_key": "hi my value here and there and back again"}, + ), + ), + ), + StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "child", + (PULL, "child"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child"), + } + }, + result={"my_key": "hi my value here and there"}, + ), + ), + next=("child",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"parent_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "my value"}, + next=("parent_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": None, + "step": 0, + "parents": {}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="parent_1", + path=(PULL, "parent_1"), + result={"my_key": "hi my value"}, + ), + ), + ), + StateSnapshot( + values={}, + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "input", + "writes": {"__start__": {"my_key": "my value"}}, + "step": -1, + "parents": {}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + tasks=( + PregelTask( + id=AnyStr(), + name="__start__", + path=(PULL, "__start__"), + result={"my_key": "my value"}, + ), + ), + ), + ] + # get child graph history + child_history = list(app.get_state_history(outer_history[2].tasks[0].state)) + assert child_history == [ + StateSnapshot( + values={"my_key": "hi my value here and there"}, + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "loop", + "writes": {"child_1": {"my_key": "hi my value here and there"}}, + "step": 1, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child", + "langgraph_path": [PULL, AnyStr("child")], + "langgraph_step": 2, + "langgraph_triggers": [AnyStr("parent_1")], + "langgraph_checkpoint_ns": AnyStr("child:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + tasks=(), + ), + StateSnapshot( + values={"my_key": "hi my value"}, + next=("child_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "loop", + "writes": None, + "step": 0, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child", + "langgraph_path": [PULL, AnyStr("child")], + "langgraph_step": 2, + "langgraph_triggers": [AnyStr("parent_1")], + "langgraph_checkpoint_ns": AnyStr("child:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="child_1", + path=(PULL, "child_1"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + } + }, + result={"my_key": "hi my value here and there"}, + ), + ), + ), + StateSnapshot( + values={}, + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "input", + "writes": {"__start__": {"my_key": "hi my value"}}, + "step": -1, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child", + "langgraph_path": [PULL, AnyStr("child")], + "langgraph_step": 2, + "langgraph_triggers": [AnyStr("parent_1")], + "langgraph_checkpoint_ns": AnyStr("child:"), + }, + created_at=AnyStr(), + parent_config=None, + tasks=( + PregelTask( + id=AnyStr(), + name="__start__", + path=(PULL, "__start__"), + result={"my_key": "hi my value"}, + ), + ), + ), + ] + # get grandchild graph history + grandchild_history = list(app.get_state_history(child_history[1].tasks[0].state)) + assert grandchild_history == [ + StateSnapshot( + values={"my_key": "hi my value here and there"}, + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + metadata={ + "source": "loop", + "writes": {"grandchild_2": {"my_key": "hi my value here and there"}}, + "step": 2, + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [ + PULL, + AnyStr("child_1"), + ], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + tasks=(), + ), + StateSnapshot( + values={"my_key": "hi my value here"}, + next=("grandchild_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + metadata={ + "source": "loop", + "writes": {"grandchild_1": {"my_key": "hi my value here"}}, + "step": 1, + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [ + PULL, + AnyStr("child_1"), + ], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="grandchild_2", + path=(PULL, "grandchild_2"), + result={"my_key": "hi my value here and there"}, + ), + ), + ), + StateSnapshot( + values={"my_key": "hi my value"}, + next=("grandchild_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + metadata={ + "source": "loop", + "writes": None, + "step": 0, + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [ + PULL, + AnyStr("child_1"), + ], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="grandchild_1", + path=(PULL, "grandchild_1"), + result={"my_key": "hi my value here"}, + ), + ), + ), + StateSnapshot( + values={}, + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + metadata={ + "source": "input", + "writes": {"__start__": {"my_key": "hi my value"}}, + "step": -1, + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [ + PULL, + AnyStr("child_1"), + ], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config=None, + tasks=( + PregelTask( + id=AnyStr(), + name="__start__", + path=(PULL, "__start__"), + result={"my_key": "hi my value"}, + ), + ), + ), + ] + + # replay grandchild checkpoint + assert [ + c for c in app.stream(None, grandchild_history[2].config, subgraphs=True) + ] == [ + ( + (AnyStr("child:"), AnyStr("child_1:")), + {"grandchild_1": {"my_key": "hi my value here"}}, + ), + ((), {"__interrupt__": ()}), + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_send_to_nested_graphs( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name) + + class OverallState(TypedDict): + subjects: list[str] + jokes: Annotated[list[str], operator.add] + + def continue_to_jokes(state: OverallState): + return [Send("generate_joke", {"subject": s}) for s in state["subjects"]] + + class JokeState(TypedDict): + subject: str + + def edit(state: JokeState): + subject = state["subject"] + return {"subject": f"{subject} - hohoho"} + + # subgraph + subgraph = StateGraph(JokeState, output=OverallState) + subgraph.add_node("edit", edit) + subgraph.add_node( + "generate", lambda state: {"jokes": [f"Joke about {state['subject']}"]} + ) + subgraph.set_entry_point("edit") + subgraph.add_edge("edit", "generate") + subgraph.set_finish_point("generate") + + # parent graph + builder = StateGraph(OverallState) + builder.add_node( + "generate_joke", + subgraph.compile(interrupt_before=["generate"]), + ) + builder.add_conditional_edges(START, continue_to_jokes) + builder.add_edge("generate_joke", END) + + graph = builder.compile(checkpointer=checkpointer) + config = {"configurable": {"thread_id": "1"}} + tracer = FakeTracer() + + # invoke and pause at nested interrupt + assert graph.invoke( + {"subjects": ["cats", "dogs"]}, config={**config, "callbacks": [tracer]} + ) == { + "subjects": ["cats", "dogs"], + "jokes": [], + } + assert len(tracer.runs) == 1, "Should produce exactly 1 root run" + + # check state + outer_state = graph.get_state(config) + + if not FF_SEND_V2: + # update state of dogs joke graph + graph.update_state(outer_state.tasks[1].state, {"subject": "turtles - hohoho"}) + + # continue past interrupt + assert sorted( + graph.stream(None, config=config), + key=lambda d: d["generate_joke"]["jokes"][0], + ) == [ + {"generate_joke": {"jokes": ["Joke about cats - hohoho"]}}, + {"generate_joke": {"jokes": ["Joke about turtles - hohoho"]}}, + ] + return + + assert outer_state == StateSnapshot( + values={"subjects": ["cats", "dogs"], "jokes": []}, + tasks=( + PregelTask( + id=AnyStr(), + name="__start__", + path=("__pregel_pull", "__start__"), + error=None, + interrupts=(), + state=None, + result={"subjects": ["cats", "dogs"]}, + ), + PregelTask( + AnyStr(), + "generate_joke", + (PUSH, ("__pregel_pull", "__start__"), 1, AnyStr()), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + } + }, + ), + PregelTask( + AnyStr(), + "generate_joke", + (PUSH, ("__pregel_pull", "__start__"), 2, AnyStr()), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + } + }, + ), + ), + next=("generate_joke", "generate_joke"), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "writes": {"__start__": {"subjects": ["cats", "dogs"]}}, + "step": -1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ) + # check state of each of the inner tasks + assert graph.get_state(outer_state.tasks[1].state) == StateSnapshot( + values={"subject": "cats - hohoho", "jokes": []}, + next=("generate",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("generate_joke:"): AnyStr(), + } + ), + } + }, + metadata={ + "step": 1, + "source": "loop", + "writes": {"edit": None}, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + "langgraph_checkpoint_ns": AnyStr("generate_joke:"), + "langgraph_node": "generate_joke", + "langgraph_path": [PUSH, ["__pregel_pull", "__start__"], 1, AnyStr()], + "langgraph_step": 0, + "langgraph_triggers": [PUSH], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("generate_joke:"): AnyStr(), + } + ), + } + }, + tasks=(PregelTask(id=AnyStr(""), name="generate", path=(PULL, "generate")),), + ) + assert graph.get_state(outer_state.tasks[2].state) == StateSnapshot( + values={"subject": "dogs - hohoho", "jokes": []}, + next=("generate",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("generate_joke:"): AnyStr(), + } + ), + } + }, + metadata={ + "step": 1, + "source": "loop", + "writes": {"edit": None}, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + "langgraph_checkpoint_ns": AnyStr("generate_joke:"), + "langgraph_node": "generate_joke", + "langgraph_path": [PUSH, ["__pregel_pull", "__start__"], 2, AnyStr()], + "langgraph_step": 0, + "langgraph_triggers": [PUSH], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("generate_joke:"): AnyStr(), + } + ), + } + }, + tasks=(PregelTask(id=AnyStr(""), name="generate", path=(PULL, "generate")),), + ) + # update state of dogs joke graph + graph.update_state( + outer_state.tasks[2 if FF_SEND_V2 else 1].state, {"subject": "turtles - hohoho"} + ) + + # continue past interrupt + assert sorted( + graph.stream(None, config=config), key=lambda d: d["generate_joke"]["jokes"][0] + ) == [ + {"generate_joke": {"jokes": ["Joke about cats - hohoho"]}}, + {"generate_joke": {"jokes": ["Joke about turtles - hohoho"]}}, + ] + + actual_snapshot = graph.get_state(config) + expected_snapshot = StateSnapshot( + values={ + "subjects": ["cats", "dogs"], + "jokes": ["Joke about cats - hohoho", "Joke about turtles - hohoho"], + }, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "generate_joke": [ + {"jokes": ["Joke about cats - hohoho"]}, + {"jokes": ["Joke about turtles - hohoho"]}, + ] + }, + "step": 0, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + assert actual_snapshot == expected_snapshot + + # test full history + actual_history = list(graph.get_state_history(config)) + + # get subgraph node state for expected history + expected_history = [ + StateSnapshot( + values={ + "subjects": ["cats", "dogs"], + "jokes": ["Joke about cats - hohoho", "Joke about turtles - hohoho"], + }, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "generate_joke": [ + {"jokes": ["Joke about cats - hohoho"]}, + {"jokes": ["Joke about turtles - hohoho"]}, + ] + }, + "step": 0, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"jokes": []}, + tasks=( + PregelTask( + id=AnyStr(), + name="__start__", + path=("__pregel_pull", "__start__"), + error=None, + interrupts=(), + state=None, + result={"subjects": ["cats", "dogs"]}, + ), + PregelTask( + AnyStr(), + "generate_joke", + (PUSH, ("__pregel_pull", "__start__"), 1, AnyStr()), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + } + }, + result={"jokes": ["Joke about cats - hohoho"]}, + ), + PregelTask( + AnyStr(), + "generate_joke", + (PUSH, ("__pregel_pull", "__start__"), 2, AnyStr()), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + } + }, + result={"jokes": ["Joke about turtles - hohoho"]}, + ), + ), + next=("__start__", "generate_joke", "generate_joke"), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "writes": {"__start__": {"subjects": ["cats", "dogs"]}}, + "step": -1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ), + ] + assert actual_history == expected_history + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_weather_subgraph( + request: pytest.FixtureRequest, checkpointer_name: str, snapshot: SnapshotAssertion +) -> None: + from langchain_core.language_models.fake_chat_models import ( + FakeMessagesListChatModel, + ) + from langchain_core.messages import AIMessage, ToolCall + from langchain_core.tools import tool + + from langgraph.graph import MessagesState + + checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name) + + # setup subgraph + + @tool + def get_weather(city: str): + """Get the weather for a specific city""" + return f"I'ts sunny in {city}!" + + weather_model = FakeMessagesListChatModel( + responses=[ + AIMessage( + content="", + tool_calls=[ + ToolCall( + id="tool_call123", + name="get_weather", + args={"city": "San Francisco"}, + ) + ], + ) + ] + ) + + class SubGraphState(MessagesState): + city: str + + def model_node(state: SubGraphState, writer: StreamWriter): + writer(" very") + result = weather_model.invoke(state["messages"]) + return {"city": cast(AIMessage, result).tool_calls[0]["args"]["city"]} + + def weather_node(state: SubGraphState, writer: StreamWriter): + writer(" good") + result = get_weather.invoke({"city": state["city"]}) + return {"messages": [{"role": "assistant", "content": result}]} + + subgraph = StateGraph(SubGraphState) + subgraph.add_node(model_node) + subgraph.add_node(weather_node) + subgraph.add_edge(START, "model_node") + subgraph.add_edge("model_node", "weather_node") + subgraph.add_edge("weather_node", END) + subgraph = subgraph.compile(interrupt_before=["weather_node"]) + + # setup main graph + + class RouterState(MessagesState): + route: Literal["weather", "other"] + + router_model = FakeMessagesListChatModel( + responses=[ + AIMessage( + content="", + tool_calls=[ + ToolCall( + id="tool_call123", + name="router", + args={"dest": "weather"}, + ) + ], + ) + ] + ) + + def router_node(state: RouterState, writer: StreamWriter): + writer("I'm") + system_message = "Classify the incoming query as either about weather or not." + messages = [{"role": "system", "content": system_message}] + state["messages"] + route = router_model.invoke(messages) + return {"route": cast(AIMessage, route).tool_calls[0]["args"]["dest"]} + + def normal_llm_node(state: RouterState): + return {"messages": [AIMessage("Hello!")]} + + def route_after_prediction(state: RouterState): + if state["route"] == "weather": + return "weather_graph" + else: + return "normal_llm_node" + + def weather_graph(state: RouterState): + return subgraph.invoke(state) + + graph = StateGraph(RouterState) + graph.add_node(router_node) + graph.add_node(normal_llm_node) + graph.add_node("weather_graph", weather_graph) + graph.add_edge(START, "router_node") + graph.add_conditional_edges("router_node", route_after_prediction) + graph.add_edge("normal_llm_node", END) + graph.add_edge("weather_graph", END) + graph = graph.compile(checkpointer=checkpointer) + + assert graph.get_graph(xray=1).draw_mermaid() == snapshot + + config = {"configurable": {"thread_id": "1"}} + thread2 = {"configurable": {"thread_id": "2"}} + inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]} + + # run with custom output + assert [c for c in graph.stream(inputs, thread2, stream_mode="custom")] == [ + "I'm", + " very", + ] + assert [c for c in graph.stream(None, thread2, stream_mode="custom")] == [ + " good", + ] + + # run until interrupt + assert [ + c + for c in graph.stream( + inputs, config=config, stream_mode="updates", subgraphs=True + ) + ] == [ + ((), {"router_node": {"route": "weather"}}), + ((AnyStr("weather_graph:"),), {"model_node": {"city": "San Francisco"}}), + ((), {"__interrupt__": ()}), + ] + + # check current state + state = graph.get_state(config) + assert state == StateSnapshot( + values={ + "messages": [_AnyIdHumanMessage(content="what's the weather in sf")], + "route": "weather", + }, + next=("weather_graph",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": {"router_node": {"route": "weather"}}, + "step": 1, + "parents": {}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="weather_graph", + path=(PULL, "weather_graph"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("weather_graph:"), + } + }, + ), + ), + ) + + # update + graph.update_state(state.tasks[0].state, {"city": "la"}) + + # run after update + assert [ + c + for c in graph.stream( + None, config=config, stream_mode="updates", subgraphs=True + ) + ] == [ + ( + (AnyStr("weather_graph:"),), + { + "weather_node": { + "messages": [{"role": "assistant", "content": "I'ts sunny in la!"}] + } + }, + ), + ( + (), + { + "weather_graph": { + "messages": [ + _AnyIdHumanMessage(content="what's the weather in sf"), + _AnyIdAIMessage(content="I'ts sunny in la!"), + ] + } + }, + ), + ] + + # try updating acting as weather node + config = {"configurable": {"thread_id": "14"}} + inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]} + assert [ + c + for c in graph.stream( + inputs, config=config, stream_mode="updates", subgraphs=True + ) + ] == [ + ((), {"router_node": {"route": "weather"}}), + ((AnyStr("weather_graph:"),), {"model_node": {"city": "San Francisco"}}), + ((), {"__interrupt__": ()}), + ] + state = graph.get_state(config, subgraphs=True) + assert state == StateSnapshot( + values={ + "messages": [_AnyIdHumanMessage(content="what's the weather in sf")], + "route": "weather", + }, + next=("weather_graph",), + config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": {"router_node": {"route": "weather"}}, + "step": 1, + "parents": {}, + "thread_id": "14", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="weather_graph", + path=(PULL, "weather_graph"), + state=StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what's the weather in sf") + ], + "city": "San Francisco", + }, + next=("weather_node",), + config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": AnyStr("weather_graph:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("weather_graph:"): AnyStr(), + } + ), + } + }, + metadata={ + "source": "loop", + "writes": {"model_node": {"city": "San Francisco"}}, + "step": 1, + "parents": {"": AnyStr()}, + "thread_id": "14", + "checkpoint_ns": AnyStr("weather_graph:"), + "langgraph_node": "weather_graph", + "langgraph_path": [PULL, "weather_graph"], + "langgraph_step": 2, + "langgraph_triggers": [ + "branch:router_node:route_after_prediction:weather_graph" + ], + "langgraph_checkpoint_ns": AnyStr("weather_graph:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": AnyStr("weather_graph:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("weather_graph:"): AnyStr(), + } + ), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="weather_node", + path=(PULL, "weather_node"), + ), + ), + ), + ), + ), + ) + graph.update_state( + state.tasks[0].state.config, + {"messages": [{"role": "assistant", "content": "rainy"}]}, + as_node="weather_node", + ) + state = graph.get_state(config, subgraphs=True) + assert state == StateSnapshot( + values={ + "messages": [_AnyIdHumanMessage(content="what's the weather in sf")], + "route": "weather", + }, + next=("weather_graph",), + config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": {"router_node": {"route": "weather"}}, + "step": 1, + "parents": {}, + "thread_id": "14", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="weather_graph", + path=(PULL, "weather_graph"), + state=StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what's the weather in sf"), + _AnyIdAIMessage(content="rainy"), + ], + "city": "San Francisco", + }, + next=(), + config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": AnyStr("weather_graph:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("weather_graph:"): AnyStr(), + } + ), + } + }, + metadata={ + "step": 2, + "source": "update", + "writes": { + "weather_node": { + "messages": [{"role": "assistant", "content": "rainy"}] + } + }, + "parents": {"": AnyStr()}, + "thread_id": "14", + "checkpoint_id": AnyStr(), + "checkpoint_ns": AnyStr("weather_graph:"), + "langgraph_node": "weather_graph", + "langgraph_path": [PULL, "weather_graph"], + "langgraph_step": 2, + "langgraph_triggers": [ + "branch:router_node:route_after_prediction:weather_graph" + ], + "langgraph_checkpoint_ns": AnyStr("weather_graph:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": AnyStr("weather_graph:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("weather_graph:"): AnyStr(), + } + ), + } + }, + tasks=(), + ), + ), + ), + ) + assert [ + c + for c in graph.stream( + None, config=config, stream_mode="updates", subgraphs=True + ) + ] == [ + ( + (), + { + "weather_graph": { + "messages": [ + _AnyIdHumanMessage(content="what's the weather in sf"), + _AnyIdAIMessage(content="rainy"), + ] + } + }, + ), + ] + + +def test_repeat_condition(snapshot: SnapshotAssertion) -> None: + class AgentState(TypedDict): + hello: str + + def router(state: AgentState) -> str: + return "hmm" + + workflow = StateGraph(AgentState) + workflow.add_node("Researcher", lambda x: x) + workflow.add_node("Chart Generator", lambda x: x) + workflow.add_node("Call Tool", lambda x: x) + workflow.add_conditional_edges( + "Researcher", + router, + { + "redo": "Researcher", + "continue": "Chart Generator", + "call_tool": "Call Tool", + "end": END, + }, + ) + workflow.add_conditional_edges( + "Chart Generator", + router, + {"continue": "Researcher", "call_tool": "Call Tool", "end": END}, + ) + workflow.add_conditional_edges( + "Call Tool", + # Each agent node updates the 'sender' field + # the tool calling node does not, meaning + # this edge will route back to the original agent + # who invoked the tool + lambda x: x["sender"], + { + "Researcher": "Researcher", + "Chart Generator": "Chart Generator", + }, + ) + workflow.set_entry_point("Researcher") + + app = workflow.compile() + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + + +def test_checkpoint_metadata() -> None: + """This test verifies that a run's configurable fields are merged with the + previous checkpoint config for each step in the run. + """ + # set up test + from langchain_core.language_models.fake_chat_models import ( + FakeMessagesListChatModel, + ) + from langchain_core.messages import AIMessage, AnyMessage + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.tools import tool + + # graph state + class BaseState(TypedDict): + messages: Annotated[list[AnyMessage], add_messages] + + # initialize graph nodes + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a nice assistant."), + ("placeholder", "{messages}"), + ] + ) + + model = FakeMessagesListChatModel( + responses=[ + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + AIMessage(content="answer"), + ] + ) + + @traceable(run_type="llm") + def agent(state: BaseState) -> BaseState: + formatted = prompt.invoke(state) + response = model.invoke(formatted) + return {"messages": response, "usage_metadata": {"total_tokens": 123}} + + def should_continue(data: BaseState) -> str: + # Logic to decide whether to continue in the loop or exit + if not data["messages"][-1].tool_calls: + return "exit" + else: + return "continue" + + # define graphs w/ and w/o interrupt + workflow = StateGraph(BaseState) + workflow.add_node("agent", agent) + workflow.add_node("tools", ToolNode(tools)) + workflow.set_entry_point("agent") + workflow.add_conditional_edges( + "agent", should_continue, {"continue": "tools", "exit": END} + ) + workflow.add_edge("tools", "agent") + + # graph w/o interrupt + checkpointer_1 = MemorySaverAssertCheckpointMetadata() + app = workflow.compile(checkpointer=checkpointer_1) + + # graph w/ interrupt + checkpointer_2 = MemorySaverAssertCheckpointMetadata() + app_w_interrupt = workflow.compile( + checkpointer=checkpointer_2, interrupt_before=["tools"] + ) + + # assertions + + # invoke graph w/o interrupt + assert app.invoke( + {"messages": ["what is weather in sf"]}, + { + "configurable": { + "thread_id": "1", + "test_config_1": "foo", + "test_config_2": "bar", + }, + }, + ) == { + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "query"}, + "id": "tool_call123", + "type": "tool_call", + } + ], + ), + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ), + _AnyIdAIMessage(content="answer"), + ] + } + + config = {"configurable": {"thread_id": "1"}} + + # assert that checkpoint metadata contains the run's configurable fields + chkpnt_metadata_1 = checkpointer_1.get_tuple(config).metadata + assert chkpnt_metadata_1["thread_id"] == "1" + assert chkpnt_metadata_1["test_config_1"] == "foo" + assert chkpnt_metadata_1["test_config_2"] == "bar" + + # Verify that all checkpoint metadata have the expected keys. This check + # is needed because a run may have an arbitrary number of steps depending + # on how the graph is constructed. + chkpnt_tuples_1 = checkpointer_1.list(config) + for chkpnt_tuple in chkpnt_tuples_1: + assert chkpnt_tuple.metadata["thread_id"] == "1" + assert chkpnt_tuple.metadata["test_config_1"] == "foo" + assert chkpnt_tuple.metadata["test_config_2"] == "bar" + + # invoke graph, but interrupt before tool call + app_w_interrupt.invoke( + {"messages": ["what is weather in sf"]}, + { + "configurable": { + "thread_id": "2", + "test_config_3": "foo", + "test_config_4": "bar", + }, + }, + ) + + config = {"configurable": {"thread_id": "2"}} + + # assert that checkpoint metadata contains the run's configurable fields + chkpnt_metadata_2 = checkpointer_2.get_tuple(config).metadata + assert chkpnt_metadata_2["thread_id"] == "2" + assert chkpnt_metadata_2["test_config_3"] == "foo" + assert chkpnt_metadata_2["test_config_4"] == "bar" + + # resume graph execution + app_w_interrupt.invoke( + input=None, + config={ + "configurable": { + "thread_id": "2", + "test_config_3": "foo", + "test_config_4": "bar", + } + }, + ) + + # assert that checkpoint metadata contains the run's configurable fields + chkpnt_metadata_3 = checkpointer_2.get_tuple(config).metadata + assert chkpnt_metadata_3["thread_id"] == "2" + assert chkpnt_metadata_3["test_config_3"] == "foo" + assert chkpnt_metadata_3["test_config_4"] == "bar" + + # Verify that all checkpoint metadata have the expected keys. This check + # is needed because a run may have an arbitrary number of steps depending + # on how the graph is constructed. + chkpnt_tuples_2 = checkpointer_2.list(config) + for chkpnt_tuple in chkpnt_tuples_2: + assert chkpnt_tuple.metadata["thread_id"] == "2" + assert chkpnt_tuple.metadata["test_config_3"] == "foo" + assert chkpnt_tuple.metadata["test_config_4"] == "bar" + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_remove_message_via_state_update( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + from langchain_core.messages import AIMessage, HumanMessage, RemoveMessage + + workflow = MessageGraph() + workflow.add_node( + "chatbot", + lambda state: [ + AIMessage( + content="Hello! How can I help you", + ) + ], + ) + + workflow.set_entry_point("chatbot") + workflow.add_edge("chatbot", END) + + checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name) + app = workflow.compile(checkpointer=checkpointer) + config = {"configurable": {"thread_id": "1"}} + output = app.invoke([HumanMessage(content="Hi")], config=config) + app.update_state(config, values=[RemoveMessage(id=output[-1].id)]) + + updated_state = app.get_state(config) + + assert len(updated_state.values) == 1 + assert updated_state.values[-1].content == "Hi" + + +def test_remove_message_from_node(): + from langchain_core.messages import AIMessage, HumanMessage, RemoveMessage + + workflow = MessageGraph() + workflow.add_node( + "chatbot", + lambda state: [ + AIMessage( + content="Hello!", + ), + AIMessage( + content="How can I help you?", + ), + ], + ) + workflow.add_node("delete_messages", lambda state: [RemoveMessage(id=state[-2].id)]) + workflow.set_entry_point("chatbot") + workflow.add_edge("chatbot", "delete_messages") + workflow.add_edge("delete_messages", END) + + app = workflow.compile() + output = app.invoke([HumanMessage(content="Hi")]) + assert len(output) == 2 + assert output[-1].content == "How can I help you?" + + +def test_xray_lance(snapshot: SnapshotAssertion): + from langchain_core.messages import AnyMessage, HumanMessage + from pydantic import BaseModel, Field + + class Analyst(BaseModel): + affiliation: str = Field( + description="Primary affiliation of the investment analyst.", + ) + name: str = Field( + description="Name of the investment analyst.", + pattern=r"^[a-zA-Z0-9_-]{1,64}$", + ) + role: str = Field( + description="Role of the investment analyst in the context of the topic.", + ) + description: str = Field( + description="Description of the investment analyst focus, concerns, and motives.", + ) + + @property + def persona(self) -> str: + return f"Name: {self.name}\nRole: {self.role}\nAffiliation: {self.affiliation}\nDescription: {self.description}\n" + + class Perspectives(BaseModel): + analysts: List[Analyst] = Field( + description="Comprehensive list of investment analysts with their roles and affiliations.", + ) + + class Section(BaseModel): + section_title: str = Field(..., title="Title of the section") + context: str = Field( + ..., title="Provide a clear summary of the focus area that you researched." + ) + findings: str = Field( + ..., + title="Give a clear and detailed overview of your findings based upon the expert interview.", + ) + thesis: str = Field( + ..., + title="Give a clear and specific investment thesis based upon these findings.", + ) + + class InterviewState(TypedDict): + messages: Annotated[List[AnyMessage], add_messages] + analyst: Analyst + section: Section + + class ResearchGraphState(TypedDict): + analysts: List[Analyst] + topic: str + max_analysts: int + sections: List[Section] + interviews: Annotated[list, operator.add] + + # Conditional edge + def route_messages(state): + return "ask_question" + + def generate_question(state): + return ... + + def generate_answer(state): + return ... + + # Add nodes and edges + interview_builder = StateGraph(InterviewState) + interview_builder.add_node("ask_question", generate_question) + interview_builder.add_node("answer_question", generate_answer) + + # Flow + interview_builder.add_edge(START, "ask_question") + interview_builder.add_edge("ask_question", "answer_question") + interview_builder.add_conditional_edges("answer_question", route_messages) + + # Set up memory + memory = MemorySaver() + + # Interview + interview_graph = interview_builder.compile(checkpointer=memory).with_config( + run_name="Conduct Interviews" + ) + + # View + assert interview_graph.get_graph().to_json() == snapshot + + def run_all_interviews(state: ResearchGraphState): + """Edge to run the interview sub-graph using Send""" + return [ + Send( + "conduct_interview", + { + "analyst": Analyst(), + "messages": [ + HumanMessage( + content="So you said you were writing an article on ...?" + ) + ], + }, + ) + for s in state["analysts"] + ] + + def generate_sections(state: ResearchGraphState): + return ... + + def generate_analysts(state: ResearchGraphState): + return ... + + builder = StateGraph(ResearchGraphState) + builder.add_node("generate_analysts", generate_analysts) + builder.add_node("conduct_interview", interview_builder.compile()) + builder.add_node("generate_sections", generate_sections) + + builder.add_edge(START, "generate_analysts") + builder.add_conditional_edges( + "generate_analysts", run_all_interviews, ["conduct_interview"] + ) + builder.add_edge("conduct_interview", "generate_sections") + builder.add_edge("generate_sections", END) + + graph = builder.compile() + + # View + assert graph.get_graph().to_json() == snapshot + assert graph.get_graph(xray=1).to_json() == snapshot + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +def test_channel_values(request: pytest.FixtureRequest, checkpointer_name: str) -> None: + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + + config = {"configurable": {"thread_id": "1"}} + chain = Channel.subscribe_to("input") | Channel.write_to("output") + app = Pregel( + nodes={ + "one": chain, + }, + channels={ + "ephemeral": EphemeralValue(Any), + "input": LastValue(int), + "output": LastValue(int), + }, + input_channels=["input", "ephemeral"], + output_channels="output", + checkpointer=checkpointer, + ) + app.invoke({"input": 1, "ephemeral": "meow"}, config) + assert checkpointer.get(config)["channel_values"] == {"input": 1, "output": 1} + + +def test_xray_issue(snapshot: SnapshotAssertion) -> None: + class State(TypedDict): + messages: Annotated[list, add_messages] + + def node(name): + def _node(state: State): + return {"messages": [("human", f"entered {name} node")]} + + return _node + + parent = StateGraph(State) + child = StateGraph(State) + + child.add_node("c_one", node("c_one")) + child.add_node("c_two", node("c_two")) + + child.add_edge("__start__", "c_one") + child.add_edge("c_two", "c_one") + + child.add_conditional_edges( + "c_one", lambda x: str(randrange(0, 2)), {"0": "c_two", "1": "__end__"} + ) + + parent.add_node("p_one", node("p_one")) + parent.add_node("p_two", child.compile()) + + parent.add_edge("__start__", "p_one") + parent.add_edge("p_two", "p_one") + + parent.add_conditional_edges( + "p_one", lambda x: str(randrange(0, 2)), {"0": "p_two", "1": "__end__"} + ) + + app = parent.compile() + + assert app.get_graph(xray=True).draw_mermaid() == snapshot + + +def test_xray_bool(snapshot: SnapshotAssertion) -> None: + class State(TypedDict): + messages: Annotated[list, add_messages] + + def node(name): + def _node(state: State): + return {"messages": [("human", f"entered {name} node")]} + + return _node + + grand_parent = StateGraph(State) + + child = StateGraph(State) + + child.add_node("c_one", node("c_one")) + child.add_node("c_two", node("c_two")) + + child.add_edge("__start__", "c_one") + child.add_edge("c_two", "c_one") + + child.add_conditional_edges( + "c_one", lambda x: str(randrange(0, 2)), {"0": "c_two", "1": "__end__"} + ) + + parent = StateGraph(State) + parent.add_node("p_one", node("p_one")) + parent.add_node("p_two", child.compile()) + parent.add_edge("__start__", "p_one") + parent.add_edge("p_two", "p_one") + parent.add_conditional_edges( + "p_one", lambda x: str(randrange(0, 2)), {"0": "p_two", "1": "__end__"} + ) + + grand_parent.add_node("gp_one", node("gp_one")) + grand_parent.add_node("gp_two", parent.compile()) + grand_parent.add_edge("__start__", "gp_one") + grand_parent.add_edge("gp_two", "gp_one") + grand_parent.add_conditional_edges( + "gp_one", lambda x: str(randrange(0, 2)), {"0": "gp_two", "1": "__end__"} + ) + + app = grand_parent.compile() + assert app.get_graph(xray=True).draw_mermaid() == snapshot + + +def test_multiple_sinks_subgraphs(snapshot: SnapshotAssertion) -> None: + class State(TypedDict): + messages: Annotated[list, add_messages] + + subgraph_builder = StateGraph(State) + subgraph_builder.add_node("one", lambda x: x) + subgraph_builder.add_node("two", lambda x: x) + subgraph_builder.add_node("three", lambda x: x) + subgraph_builder.add_edge("__start__", "one") + subgraph_builder.add_conditional_edges("one", lambda x: "two", ["two", "three"]) + subgraph = subgraph_builder.compile() + + builder = StateGraph(State) + builder.add_node("uno", lambda x: x) + builder.add_node("dos", lambda x: x) + builder.add_node("subgraph", subgraph) + builder.add_edge("__start__", "uno") + builder.add_conditional_edges("uno", lambda x: "dos", ["dos", "subgraph"]) + + app = builder.compile() + assert app.get_graph(xray=True).draw_mermaid() == snapshot + + +def test_subgraph_retries(): + class State(TypedDict): + count: int + + class ChildState(State): + some_list: Annotated[list, operator.add] + + called_times = 0 + + class RandomError(ValueError): + """This will be retried on.""" + + def parent_node(state: State): + return {"count": state["count"] + 1} + + def child_node_a(state: ChildState): + nonlocal called_times + # We want it to retry only on node_b + # NOT re-compute the whole graph. + assert not called_times + called_times += 1 + return {"some_list": ["val"]} + + def child_node_b(state: ChildState): + raise RandomError("First attempt fails") + + child = StateGraph(ChildState) + child.add_node(child_node_a) + child.add_node(child_node_b) + child.add_edge("__start__", "child_node_a") + child.add_edge("child_node_a", "child_node_b") + + parent = StateGraph(State) + parent.add_node("parent_node", parent_node) + parent.add_node( + "child_graph", + child.compile(), + retry=RetryPolicy( + max_attempts=3, + retry_on=(RandomError,), + backoff_factor=0.0001, + initial_interval=0.0001, + ), + ) + parent.add_edge("parent_node", "child_graph") + parent.set_entry_point("parent_node") + + checkpointer = MemorySaver() + app = parent.compile(checkpointer=checkpointer) + with pytest.raises(RandomError): + app.invoke({"count": 0}, {"configurable": {"thread_id": "foo"}}) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC) +@pytest.mark.parametrize("store_name", ALL_STORES_SYNC) +def test_store_injected( + request: pytest.FixtureRequest, checkpointer_name: str, store_name: str +) -> None: + checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}") + the_store = request.getfixturevalue(f"store_{store_name}") + + class State(TypedDict): + count: Annotated[int, operator.add] + + doc_id = str(uuid.uuid4()) + doc = {"some-key": "this-is-a-val"} + + def node(input: State, config: RunnableConfig, store: BaseStore): + assert isinstance(store, BaseStore) + store.put( + ("foo", "bar"), + doc_id, + { + **doc, + "from_thread": config["configurable"]["thread_id"], + "some_val": input["count"], + }, + ) + return {"count": 1} + + builder = StateGraph(State) + builder.add_node("node", node) + builder.add_edge("__start__", "node") + graph = builder.compile(store=the_store, checkpointer=checkpointer) + + thread_1 = str(uuid.uuid4()) + result = graph.invoke({"count": 0}, {"configurable": {"thread_id": thread_1}}) + assert result == {"count": 1} + returned_doc = the_store.get(("foo", "bar"), doc_id).value + assert returned_doc == {**doc, "from_thread": thread_1, "some_val": 0} + assert len(the_store.search(("foo", "bar"))) == 1 + + # Check update on existing thread + result = graph.invoke({"count": 0}, {"configurable": {"thread_id": thread_1}}) + assert result == {"count": 2} + returned_doc = the_store.get(("foo", "bar"), doc_id).value + assert returned_doc == {**doc, "from_thread": thread_1, "some_val": 1} + assert len(the_store.search(("foo", "bar"))) == 1 + + thread_2 = str(uuid.uuid4()) + + result = graph.invoke({"count": 0}, {"configurable": {"thread_id": thread_2}}) + assert result == {"count": 1} + returned_doc = the_store.get(("foo", "bar"), doc_id).value + assert returned_doc == { + **doc, + "from_thread": thread_2, + "some_val": 0, + } # Overwrites the whole doc + assert len(the_store.search(("foo", "bar"))) == 1 # still overwriting the same one + + +def test_enum_node_names(): + class NodeName(str, enum.Enum): + BAZ = "baz" + + class State(TypedDict): + foo: str + bar: str + + def baz(state: State): + return {"bar": state["foo"] + "!"} + + graph = StateGraph(State) + graph.add_node(NodeName.BAZ, baz) + graph.add_edge(START, NodeName.BAZ) + graph.add_edge(NodeName.BAZ, END) + graph = graph.compile() + + assert graph.invoke({"foo": "hello"}) == {"foo": "hello", "bar": "hello!"} + + +def test_debug_retry(): + class State(TypedDict): + messages: Annotated[list[str], operator.add] + + def node(name): + def _node(state: State): + return {"messages": [f"entered {name} node"]} + + return _node + + builder = StateGraph(State) + builder.add_node("one", node("one")) + builder.add_node("two", node("two")) + builder.add_edge(START, "one") + builder.add_edge("one", "two") + builder.add_edge("two", END) + + saver = MemorySaver() + + graph = builder.compile(checkpointer=saver) + + config = {"configurable": {"thread_id": "1"}} + graph.invoke({"messages": []}, config=config) + + # re-run step: 1 + target_config = next( + c.parent_config for c in saver.list(config) if c.metadata["step"] == 1 + ) + update_config = graph.update_state(target_config, values=None) + + events = [*graph.stream(None, config=update_config, stream_mode="debug")] + + checkpoint_events = list( + reversed([e["payload"] for e in events if e["type"] == "checkpoint"]) + ) + + checkpoint_history = { + c.config["configurable"]["checkpoint_id"]: c + for c in graph.get_state_history(config) + } + + def lax_normalize_config(config: Optional[dict]) -> Optional[dict]: + if config is None: + return None + return config["configurable"] + + for stream in checkpoint_events: + stream_conf = lax_normalize_config(stream["config"]) + stream_parent_conf = lax_normalize_config(stream["parent_config"]) + assert stream_conf != stream_parent_conf + + # ensure the streamed checkpoint == checkpoint from checkpointer.list() + history = checkpoint_history[stream["config"]["configurable"]["checkpoint_id"]] + history_conf = lax_normalize_config(history.config) + assert stream_conf == history_conf + + history_parent_conf = lax_normalize_config(history.parent_config) + assert stream_parent_conf == history_parent_conf + + +def test_debug_subgraphs(): + class State(TypedDict): + messages: Annotated[list[str], operator.add] + + def node(name): + def _node(state: State): + return {"messages": [f"entered {name} node"]} + + return _node + + parent = StateGraph(State) + child = StateGraph(State) + + child.add_node("c_one", node("c_one")) + child.add_node("c_two", node("c_two")) + child.add_edge(START, "c_one") + child.add_edge("c_one", "c_two") + child.add_edge("c_two", END) + + parent.add_node("p_one", node("p_one")) + parent.add_node("p_two", child.compile()) + parent.add_edge(START, "p_one") + parent.add_edge("p_one", "p_two") + parent.add_edge("p_two", END) + + graph = parent.compile(checkpointer=MemorySaver()) + + config = {"configurable": {"thread_id": "1"}} + events = [ + *graph.stream( + {"messages": []}, + config=config, + stream_mode="debug", + ) + ] + + checkpoint_events = list( + reversed([e["payload"] for e in events if e["type"] == "checkpoint"]) + ) + checkpoint_history = list(graph.get_state_history(config)) + + assert len(checkpoint_events) == len(checkpoint_history) + + def lax_normalize_config(config: Optional[dict]) -> Optional[dict]: + if config is None: + return None + return config["configurable"] + + for stream, history in zip(checkpoint_events, checkpoint_history): + assert stream["values"] == history.values + assert stream["next"] == list(history.next) + assert lax_normalize_config(stream["config"]) == lax_normalize_config( + history.config + ) + assert lax_normalize_config(stream["parent_config"]) == lax_normalize_config( + history.parent_config + ) + + assert len(stream["tasks"]) == len(history.tasks) + for stream_task, history_task in zip(stream["tasks"], history.tasks): + assert stream_task["id"] == history_task.id + assert stream_task["name"] == history_task.name + assert stream_task["interrupts"] == history_task.interrupts + assert stream_task.get("error") == history_task.error + assert stream_task.get("state") == history_task.state + + +def test_debug_nested_subgraphs(): + from collections import defaultdict + + class State(TypedDict): + messages: Annotated[list[str], operator.add] + + def node(name): + def _node(state: State): + return {"messages": [f"entered {name} node"]} + + return _node + + grand_parent = StateGraph(State) + parent = StateGraph(State) + child = StateGraph(State) + + child.add_node("c_one", node("c_one")) + child.add_node("c_two", node("c_two")) + child.add_edge(START, "c_one") + child.add_edge("c_one", "c_two") + child.add_edge("c_two", END) + + parent.add_node("p_one", node("p_one")) + parent.add_node("p_two", child.compile()) + parent.add_edge(START, "p_one") + parent.add_edge("p_one", "p_two") + parent.add_edge("p_two", END) + + grand_parent.add_node("gp_one", node("gp_one")) + grand_parent.add_node("gp_two", parent.compile()) + grand_parent.add_edge(START, "gp_one") + grand_parent.add_edge("gp_one", "gp_two") + grand_parent.add_edge("gp_two", END) + + graph = grand_parent.compile(checkpointer=MemorySaver()) + + config = {"configurable": {"thread_id": "1"}} + events = [ + *graph.stream( + {"messages": []}, + config=config, + stream_mode="debug", + subgraphs=True, + ) + ] + + stream_ns: dict[tuple, dict] = defaultdict(list) + for ns, e in events: + if e["type"] == "checkpoint": + stream_ns[ns].append(e["payload"]) + + assert list(stream_ns.keys()) == [ + (), + (AnyStr("gp_two:"),), + (AnyStr("gp_two:"), AnyStr("p_two:")), + ] + + history_ns = { + ns: list( + graph.get_state_history( + {"configurable": {"thread_id": "1", "checkpoint_ns": "|".join(ns)}} + ) + )[::-1] + for ns in stream_ns.keys() + } + + def normalize_config(config: Optional[dict]) -> Optional[dict]: + if config is None: + return None + + clean_config = {} + clean_config["thread_id"] = config["configurable"]["thread_id"] + clean_config["checkpoint_id"] = config["configurable"]["checkpoint_id"] + clean_config["checkpoint_ns"] = config["configurable"]["checkpoint_ns"] + if "checkpoint_map" in config["configurable"]: + clean_config["checkpoint_map"] = config["configurable"]["checkpoint_map"] + + return clean_config + + for checkpoint_events, checkpoint_history in zip( + stream_ns.values(), history_ns.values() + ): + for stream, history in zip(checkpoint_events, checkpoint_history): + assert stream["values"] == history.values + assert stream["next"] == list(history.next) + assert normalize_config(stream["config"]) == normalize_config( + history.config + ) + assert normalize_config(stream["parent_config"]) == normalize_config( + history.parent_config + ) + + assert len(stream["tasks"]) == len(history.tasks) + for stream_task, history_task in zip(stream["tasks"], history.tasks): + assert stream_task["id"] == history_task.id + assert stream_task["name"] == history_task.name + assert stream_task["interrupts"] == history_task.interrupts + assert stream_task.get("error") == history_task.error + assert stream_task.get("state") == history_task.state + + +def test_add_sequence(): + class State(TypedDict): + foo: Annotated[list[str], operator.add] + bar: str + + def step1(state: State): + return {"foo": ["step1"], "bar": "baz"} + + def step2(state: State): + return {"foo": ["step2"]} + + # test raising if less than 1 steps + with pytest.raises(ValueError): + StateGraph(State).add_sequence([]) + + # test raising if duplicate step names + with pytest.raises(ValueError): + StateGraph(State).add_sequence([step1, step1]) + + with pytest.raises(ValueError): + StateGraph(State).add_sequence([("foo", step1), ("foo", step1)]) + + # test unnamed steps + builder = StateGraph(State) + builder.add_sequence([step1, step2]) + builder.add_edge(START, "step1") + graph = builder.compile() + result = graph.invoke({"foo": []}) + assert result == {"foo": ["step1", "step2"], "bar": "baz"} + stream_chunks = list(graph.stream({"foo": []})) + assert stream_chunks == [ + {"step1": {"foo": ["step1"], "bar": "baz"}}, + {"step2": {"foo": ["step2"]}}, + ] + + # test named steps + builder_named_steps = StateGraph(State) + builder_named_steps.add_sequence([("meow1", step1), ("meow2", step2)]) + builder_named_steps.add_edge(START, "meow1") + graph_named_steps = builder_named_steps.compile() + result = graph_named_steps.invoke({"foo": []}) + stream_chunks = list(graph_named_steps.stream({"foo": []})) + assert result == {"foo": ["step1", "step2"], "bar": "baz"} + assert stream_chunks == [ + {"meow1": {"foo": ["step1"], "bar": "baz"}}, + {"meow2": {"foo": ["step2"]}}, + ] + + builder_named_steps = StateGraph(State) + builder_named_steps.add_sequence( + [ + ("meow1", lambda state: {"foo": ["foo"]}), + ("meow2", lambda state: {"bar": state["foo"][0] + "bar"}), + ], + ) + builder_named_steps.add_edge(START, "meow1") + graph_named_steps = builder_named_steps.compile() + result = graph_named_steps.invoke({"foo": []}) + stream_chunks = list(graph_named_steps.stream({"foo": []})) + # filtered by output schema + assert result == {"bar": "foobar", "foo": ["foo"]} + assert stream_chunks == [ + {"meow1": {"foo": ["foo"]}}, + {"meow2": {"bar": "foobar"}}, + ] + + # test two sequences + + def a(state: State): + return {"foo": ["a"]} + + def b(state: State): + return {"foo": ["b"]} + + builder_two_sequences = StateGraph(State) + builder_two_sequences.add_sequence([a]) + builder_two_sequences.add_sequence([b]) + builder_two_sequences.add_edge(START, "a") + builder_two_sequences.add_edge("a", "b") + graph_two_sequences = builder_two_sequences.compile() + + result = graph_two_sequences.invoke({"foo": []}) + assert result == {"foo": ["a", "b"]} + + stream_chunks = list(graph_two_sequences.stream({"foo": []})) + assert stream_chunks == [ + {"a": {"foo": ["a"]}}, + {"b": {"foo": ["b"]}}, + ] + + # test mixed nodes and sequences + + def c(state: State): + return {"foo": ["c"]} + + def d(state: State): + return {"foo": ["d"]} + + def e(state: State): + return {"foo": ["e"]} + + def foo(state: State): + if state["foo"][0] == "a": + return "d" + else: + return "c" + + builder_complex = StateGraph(State) + builder_complex.add_sequence([a, b]) + builder_complex.add_conditional_edges("b", foo) + builder_complex.add_node(c) + builder_complex.add_sequence([d, e]) + builder_complex.add_edge(START, "a") + graph_complex = builder_complex.compile() + + result = graph_complex.invoke({"foo": []}) + assert result == {"foo": ["a", "b", "d", "e"]} + + result = graph_complex.invoke({"foo": ["start"]}) + assert result == {"foo": ["start", "a", "b", "c"]} + + stream_chunks = list(graph_complex.stream({"foo": []})) + assert stream_chunks == [ + {"a": {"foo": ["a"]}}, + {"b": {"foo": ["b"]}}, + {"d": {"foo": ["d"]}}, + {"e": {"foo": ["e"]}}, + ] + + +def test_runnable_passthrough_node_graph() -> None: + class State(TypedDict): + changeme: str + + async def dummy(state): + return state + + agent = dummy | RunnablePassthrough.assign(prediction=RunnableLambda(lambda x: x)) + + graph_builder = StateGraph(State) + + graph_builder.add_node("agent", agent) + graph_builder.add_edge(START, "agent") + + graph = graph_builder.compile() + + assert graph.get_graph(xray=True).to_json() == graph.get_graph(xray=False).to_json() diff --git a/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_pregel_async.py b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_pregel_async.py new file mode 100644 index 0000000..af863a1 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/integration_tests/test_pregel_async.py @@ -0,0 +1,12573 @@ +import asyncio +import operator +import random +import re +import sys +import uuid +from collections import Counter +from contextlib import asynccontextmanager, contextmanager +from dataclasses import replace +from time import perf_counter +from typing import ( + Annotated, + Any, + AsyncGenerator, + AsyncIterator, + Dict, + Generator, + List, + Literal, + Optional, + Tuple, + TypedDict, + Union, + cast, +) +from uuid import UUID + +import httpx +import pytest +from langchain_core.messages import ToolCall +from langchain_core.runnables import ( + RunnableConfig, + RunnableLambda, + RunnablePassthrough, + RunnablePick, +) +from langchain_core.utils.aiter import aclosing +from pydantic import BaseModel +from pytest_mock import MockerFixture +from syrupy import SnapshotAssertion + +from langgraph.channels.base import BaseChannel +from langgraph.channels.binop import BinaryOperatorAggregate +from langgraph.channels.context import Context +from langgraph.channels.last_value import LastValue +from langgraph.channels.topic import Topic +from langgraph.channels.untracked_value import UntrackedValue +from langgraph.checkpoint.base import ( + ChannelVersions, + Checkpoint, + CheckpointMetadata, + CheckpointTuple, +) +from langgraph.checkpoint.memory import MemorySaver +from langgraph.constants import ( + CONFIG_KEY_NODE_FINISHED, + ERROR, + FF_SEND_V2, + PULL, + PUSH, + START, +) +from langgraph.errors import InvalidUpdateError, MultipleSubgraphsError, NodeInterrupt +from langgraph.graph import END, Graph, GraphCommand, StateGraph +from langgraph.graph.message import MessageGraph, MessagesState, add_messages +from langgraph.managed.shared_value import SharedValue +from langgraph.prebuilt.chat_agent_executor import create_tool_calling_executor +from langgraph.prebuilt.tool_node import ToolNode +from langgraph.pregel import Channel, GraphRecursionError, Pregel, StateSnapshot +from langgraph.pregel.retry import RetryPolicy +from langgraph.store.base import BaseStore +from langgraph.store.memory import InMemoryStore +from langgraph.types import ( + Command, + Interrupt, + PregelTask, + Send, + StreamWriter, + interrupt, +) + +from .any_str import AnyDict, AnyStr, AnyVersion, FloatBetween, UnsortedSequence +from .conftest import ( + ALL_CHECKPOINTERS_ASYNC, + ALL_CHECKPOINTERS_ASYNC_PLUS_NONE, + ALL_STORES_ASYNC, + SHOULD_CHECK_SNAPSHOTS, + awith_checkpointer, + awith_store, +) +from .fake_chat import FakeChatModel +from .fake_tracer import FakeTracer +from .memory_assert import ( + MemorySaverAssertCheckpointMetadata, + MemorySaverNoPending, +) +from .messages import ( + _AnyIdAIMessage, + _AnyIdAIMessageChunk, + _AnyIdHumanMessage, + _AnyIdToolMessage, +) + +pytestmark = pytest.mark.anyio + + +async def test_checkpoint_errors() -> None: + class FaultyGetCheckpointer(MemorySaver): + async def aget_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: + raise ValueError("Faulty get_tuple") + + class FaultyPutCheckpointer(MemorySaver): + async def aput( + self, + config: RunnableConfig, + checkpoint: Checkpoint, + metadata: CheckpointMetadata, + new_versions: ChannelVersions, + ) -> RunnableConfig: + raise ValueError("Faulty put") + + class FaultyPutWritesCheckpointer(MemorySaver): + async def aput_writes( + self, config: RunnableConfig, writes: List[Tuple[str, Any]], task_id: str + ) -> RunnableConfig: + raise ValueError("Faulty put_writes") + + class FaultyVersionCheckpointer(MemorySaver): + def get_next_version(self, current: Optional[int], channel: BaseChannel) -> int: + raise ValueError("Faulty get_next_version") + + def logic(inp: str) -> str: + return "" + + builder = StateGraph(Annotated[str, operator.add]) + builder.add_node("agent", logic) + builder.add_edge(START, "agent") + + graph = builder.compile(checkpointer=FaultyGetCheckpointer()) + with pytest.raises(ValueError, match="Faulty get_tuple"): + await graph.ainvoke("", {"configurable": {"thread_id": "thread-1"}}) + with pytest.raises(ValueError, match="Faulty get_tuple"): + async for _ in graph.astream("", {"configurable": {"thread_id": "thread-2"}}): + pass + with pytest.raises(ValueError, match="Faulty get_tuple"): + async for _ in graph.astream_events( + "", {"configurable": {"thread_id": "thread-3"}}, version="v2" + ): + pass + + graph = builder.compile(checkpointer=FaultyPutCheckpointer()) + with pytest.raises(ValueError, match="Faulty put"): + await graph.ainvoke("", {"configurable": {"thread_id": "thread-1"}}) + with pytest.raises(ValueError, match="Faulty put"): + async for _ in graph.astream("", {"configurable": {"thread_id": "thread-2"}}): + pass + with pytest.raises(ValueError, match="Faulty put"): + async for _ in graph.astream_events( + "", {"configurable": {"thread_id": "thread-3"}}, version="v2" + ): + pass + + graph = builder.compile(checkpointer=FaultyVersionCheckpointer()) + with pytest.raises(ValueError, match="Faulty get_next_version"): + await graph.ainvoke("", {"configurable": {"thread_id": "thread-1"}}) + with pytest.raises(ValueError, match="Faulty get_next_version"): + async for _ in graph.astream("", {"configurable": {"thread_id": "thread-2"}}): + pass + with pytest.raises(ValueError, match="Faulty get_next_version"): + async for _ in graph.astream_events( + "", {"configurable": {"thread_id": "thread-3"}}, version="v2" + ): + pass + + # add a parallel node + builder.add_node("parallel", logic) + builder.add_edge(START, "parallel") + graph = builder.compile(checkpointer=FaultyPutWritesCheckpointer()) + with pytest.raises(ValueError, match="Faulty put_writes"): + await graph.ainvoke("", {"configurable": {"thread_id": "thread-1"}}) + with pytest.raises(ValueError, match="Faulty put_writes"): + async for _ in graph.astream("", {"configurable": {"thread_id": "thread-2"}}): + pass + with pytest.raises(ValueError, match="Faulty put_writes"): + async for _ in graph.astream_events( + "", {"configurable": {"thread_id": "thread-3"}}, version="v2" + ): + pass + + +async def test_node_cancellation_on_external_cancel() -> None: + inner_task_cancelled = False + + async def awhile(input: Any) -> None: + try: + await asyncio.sleep(1) + except asyncio.CancelledError: + nonlocal inner_task_cancelled + inner_task_cancelled = True + raise + + builder = Graph() + builder.add_node("agent", awhile) + builder.set_entry_point("agent") + builder.set_finish_point("agent") + + graph = builder.compile() + + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(graph.ainvoke(1), 0.5) + + assert inner_task_cancelled + + +async def test_node_cancellation_on_other_node_exception() -> None: + inner_task_cancelled = False + + async def awhile(input: Any) -> None: + try: + await asyncio.sleep(1) + except asyncio.CancelledError: + nonlocal inner_task_cancelled + inner_task_cancelled = True + raise + + async def iambad(input: Any) -> None: + raise ValueError("I am bad") + + builder = Graph() + builder.add_node("agent", awhile) + builder.add_node("bad", iambad) + builder.set_conditional_entry_point(lambda _: ["agent", "bad"], then=END) + + graph = builder.compile() + + with pytest.raises(ValueError, match="I am bad"): + # This will raise ValueError, not TimeoutError + await asyncio.wait_for(graph.ainvoke(1), 0.5) + + assert inner_task_cancelled + + +async def test_node_cancellation_on_other_node_exception_two() -> None: + async def awhile(input: Any) -> None: + await asyncio.sleep(1) + + async def iambad(input: Any) -> None: + raise ValueError("I am bad") + + builder = Graph() + builder.add_node("agent", awhile) + builder.add_node("bad", iambad) + builder.set_conditional_entry_point(lambda _: ["agent", "bad"], then=END) + + graph = builder.compile() + + with pytest.raises(ValueError, match="I am bad"): + # This will raise ValueError, not CancelledError + await graph.ainvoke(1) + + +@pytest.mark.skipif( + sys.version_info < (3, 11), + reason="Python 3.11+ is required for async contextvars support", +) +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_dynamic_interrupt(checkpointer_name: str) -> None: + class State(TypedDict): + my_key: Annotated[str, operator.add] + market: str + + tool_two_node_count = 0 + + async def tool_two_node(s: State) -> State: + nonlocal tool_two_node_count + tool_two_node_count += 1 + if s["market"] == "DE": + answer = interrupt("Just because...") + else: + answer = " all good" + return {"my_key": answer} + + tool_two_graph = StateGraph(State) + tool_two_graph.add_node("tool_two", tool_two_node, retry=RetryPolicy()) + tool_two_graph.add_edge(START, "tool_two") + tool_two = tool_two_graph.compile() + + tracer = FakeTracer() + assert await tool_two.ainvoke( + {"my_key": "value", "market": "DE"}, {"callbacks": [tracer]} + ) == { + "my_key": "value", + "market": "DE", + } + assert tool_two_node_count == 1, "interrupts aren't retried" + assert len(tracer.runs) == 1 + run = tracer.runs[0] + assert run.end_time is not None + assert run.error is None + assert run.outputs == {"market": "DE", "my_key": "value"} + + assert await tool_two.ainvoke({"my_key": "value", "market": "US"}) == { + "my_key": "value all good", + "market": "US", + } + + async with awith_checkpointer(checkpointer_name) as checkpointer: + tool_two = tool_two_graph.compile(checkpointer=checkpointer) + + # missing thread_id + with pytest.raises(ValueError, match="thread_id"): + await tool_two.ainvoke({"my_key": "value", "market": "DE"}) + + # flow: interrupt -> resume with answer + thread2 = {"configurable": {"thread_id": "2"}} + # stop when about to enter node + assert [ + c + async for c in tool_two.astream( + {"my_key": "value ⛰️", "market": "DE"}, thread2 + ) + ] == [ + { + "__interrupt__": ( + Interrupt( + value="Just because...", + resumable=True, + ns=[AnyStr("tool_two:")], + ), + ) + }, + ] + # resume with answer + assert [ + c async for c in tool_two.astream(Command(resume=" my answer"), thread2) + ] == [ + {"tool_two": {"my_key": " my answer"}}, + ] + + # flow: interrupt -> clear + thread1 = {"configurable": {"thread_id": "1"}} + # stop when about to enter node + assert [ + c + async for c in tool_two.astream( + {"my_key": "value ⛰️", "market": "DE"}, thread1 + ) + ] == [ + { + "__interrupt__": ( + Interrupt( + value="Just because...", + resumable=True, + ns=[AnyStr("tool_two:")], + ), + ) + }, + ] + assert [c.metadata async for c in tool_two.checkpointer.alist(thread1)] == [ + { + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "1", + }, + { + "parents": {}, + "source": "input", + "step": -1, + "writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}}, + "thread_id": "1", + }, + ] + tup = await tool_two.checkpointer.aget_tuple(thread1) + assert await tool_two.aget_state(thread1) == StateSnapshot( + values={"my_key": "value ⛰️", "market": "DE"}, + next=("tool_two",), + tasks=( + PregelTask( + AnyStr(), + "tool_two", + (PULL, "tool_two"), + interrupts=( + Interrupt( + value="Just because...", + resumable=True, + ns=[AnyStr("tool_two:")], + ), + ), + ), + ), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "1", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread1, limit=2) + ][-1].config, + ) + + # clear the interrupt and next tasks + await tool_two.aupdate_state(thread1, None, as_node=END) + # interrupt is cleared, as well as the next tasks + tup = await tool_two.checkpointer.aget_tuple(thread1) + assert await tool_two.aget_state(thread1) == StateSnapshot( + values={"my_key": "value ⛰️", "market": "DE"}, + next=(), + tasks=(), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": {}, + "thread_id": "1", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread1, limit=2) + ][-1].config, + ) + + +@pytest.mark.skipif(not FF_SEND_V2, reason="send v2 is not enabled") +@pytest.mark.skipif( + sys.version_info < (3, 11), + reason="Python 3.11+ is required for async contextvars support", +) +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_copy_checkpoint(checkpointer_name: str) -> None: + class State(TypedDict): + my_key: Annotated[str, operator.add] + market: str + + def tool_one(s: State) -> State: + return {"my_key": " one"} + + tool_two_node_count = 0 + + def tool_two_node(s: State) -> State: + nonlocal tool_two_node_count + tool_two_node_count += 1 + if s["market"] == "DE": + answer = interrupt("Just because...") + else: + answer = " all good" + return {"my_key": answer} + + def start(state: State) -> list[Union[Send, str]]: + return ["tool_two", Send("tool_one", state)] + + tool_two_graph = StateGraph(State) + tool_two_graph.add_node("tool_two", tool_two_node, retry=RetryPolicy()) + tool_two_graph.add_node("tool_one", tool_one) + tool_two_graph.set_conditional_entry_point(start) + tool_two = tool_two_graph.compile() + + tracer = FakeTracer() + assert await tool_two.ainvoke( + {"my_key": "value", "market": "DE"}, {"callbacks": [tracer]} + ) == { + "my_key": "value one", + "market": "DE", + } + assert tool_two_node_count == 1, "interrupts aren't retried" + assert len(tracer.runs) == 1 + run = tracer.runs[0] + assert run.end_time is not None + assert run.error is None + assert run.outputs == {"market": "DE", "my_key": "value one"} + + assert await tool_two.ainvoke({"my_key": "value", "market": "US"}) == { + "my_key": "value one all good", + "market": "US", + } + + async with awith_checkpointer(checkpointer_name) as checkpointer: + tool_two = tool_two_graph.compile(checkpointer=checkpointer) + + # missing thread_id + with pytest.raises(ValueError, match="thread_id"): + await tool_two.ainvoke({"my_key": "value", "market": "DE"}) + + # flow: interrupt -> resume with answer + thread2 = {"configurable": {"thread_id": "2"}} + # stop when about to enter node + assert [ + c + async for c in tool_two.astream( + {"my_key": "value ⛰️", "market": "DE"}, thread2 + ) + ] == [ + { + "tool_one": {"my_key": " one"}, + }, + { + "__interrupt__": ( + Interrupt( + value="Just because...", + resumable=True, + ns=[AnyStr("tool_two:")], + ), + ) + }, + ] + # resume with answer + assert [ + c async for c in tool_two.astream(Command(resume=" my answer"), thread2) + ] == [ + {"tool_two": {"my_key": " my answer"}}, + ] + + # flow: interrupt -> clear tasks + thread1 = {"configurable": {"thread_id": "1"}} + # stop when about to enter node + assert await tool_two.ainvoke( + {"my_key": "value ⛰️", "market": "DE"}, thread1 + ) == { + "my_key": "value ⛰️ one", + "market": "DE", + } + assert [c.metadata async for c in tool_two.checkpointer.alist(thread1)] == [ + { + "parents": {}, + "source": "loop", + "step": 0, + "writes": {"tool_one": {"my_key": " one"}}, + "thread_id": "1", + }, + { + "parents": {}, + "source": "input", + "step": -1, + "writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}}, + "thread_id": "1", + }, + ] + tup = await tool_two.checkpointer.aget_tuple(thread1) + assert await tool_two.aget_state(thread1) == StateSnapshot( + values={"my_key": "value ⛰️ one", "market": "DE"}, + next=("tool_two",), + tasks=( + PregelTask( + AnyStr(), + "tool_two", + (PULL, "tool_two"), + interrupts=( + Interrupt( + value="Just because...", + resumable=True, + ns=[AnyStr("tool_two:")], + ), + ), + ), + ), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": {"tool_one": {"my_key": " one"}}, + "thread_id": "1", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread1, limit=2) + ][-1].config, + ) + # clear the interrupt and next tasks + await tool_two.aupdate_state(thread1, None) + # interrupt is cleared, next task is kept + tup = await tool_two.checkpointer.aget_tuple(thread1) + assert await tool_two.aget_state(thread1) == StateSnapshot( + values={"my_key": "value ⛰️ one", "market": "DE"}, + next=("tool_two",), + tasks=( + PregelTask( + AnyStr(), + "tool_two", + (PULL, "tool_two"), + interrupts=(), + ), + ), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": {}, + "thread_id": "1", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread1, limit=2) + ][-1].config, + ) + + +@pytest.mark.skipif( + sys.version_info < (3, 11), + reason="Python 3.11+ is required for async contextvars support", +) +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_node_not_cancelled_on_other_node_interrupted( + checkpointer_name: str, +) -> None: + class State(TypedDict): + hello: Annotated[str, operator.add] + + awhiles = 0 + inner_task_cancelled = False + + async def awhile(input: State) -> None: + nonlocal awhiles + + awhiles += 1 + try: + await asyncio.sleep(1) + return {"hello": " again"} + except asyncio.CancelledError: + nonlocal inner_task_cancelled + inner_task_cancelled = True + raise + + async def iambad(input: State) -> None: + return {"hello": interrupt("I am bad")} + + builder = StateGraph(State) + builder.add_node("agent", awhile) + builder.add_node("bad", iambad) + builder.set_conditional_entry_point(lambda _: ["agent", "bad"], then=END) + + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer) + thread = {"configurable": {"thread_id": "1"}} + + # writes from "awhile" are applied to last chunk + assert await graph.ainvoke({"hello": "world"}, thread) == { + "hello": "world again" + } + + assert not inner_task_cancelled + assert awhiles == 1 + + assert await graph.ainvoke(None, thread, debug=True) == {"hello": "world again"} + + assert not inner_task_cancelled + assert awhiles == 1 + + # resume with answer + assert await graph.ainvoke(Command(resume=" okay"), thread) == { + "hello": "world again okay" + } + + assert not inner_task_cancelled + assert awhiles == 1 + + +@pytest.mark.repeat(10) +async def test_step_timeout_on_stream_hang() -> None: + inner_task_cancelled = False + + async def awhile(input: Any) -> None: + try: + await asyncio.sleep(1.5) + except asyncio.CancelledError: + nonlocal inner_task_cancelled + inner_task_cancelled = True + raise + + async def alittlewhile(input: Any) -> None: + await asyncio.sleep(0.6) + return "1" + + builder = Graph() + builder.add_node(awhile) + builder.add_node(alittlewhile) + builder.set_conditional_entry_point(lambda _: ["awhile", "alittlewhile"], then=END) + graph = builder.compile() + graph.step_timeout = 1 + + with pytest.raises(asyncio.TimeoutError): + async for chunk in graph.astream(1, stream_mode="updates"): + assert chunk == {"alittlewhile": {"alittlewhile": "1"}} + await asyncio.sleep(0.6) + + assert inner_task_cancelled + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC_PLUS_NONE) +async def test_cancel_graph_astream(checkpointer_name: str) -> None: + class State(TypedDict): + value: Annotated[int, operator.add] + + class AwhileMaker: + def __init__(self) -> None: + self.reset() + + async def __call__(self, input: State) -> Any: + self.started = True + try: + await asyncio.sleep(1.5) + except asyncio.CancelledError: + self.cancelled = True + raise + + def reset(self): + self.started = False + self.cancelled = False + + async def alittlewhile(input: State) -> None: + await asyncio.sleep(0.6) + return {"value": 2} + + awhile = AwhileMaker() + aparallelwhile = AwhileMaker() + builder = StateGraph(State) + builder.add_node("awhile", awhile) + builder.add_node("aparallelwhile", aparallelwhile) + builder.add_node(alittlewhile) + builder.add_edge(START, "alittlewhile") + builder.add_edge(START, "aparallelwhile") + builder.add_edge("alittlewhile", "awhile") + + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer) + + # test interrupting astream + got_event = False + thread1: RunnableConfig = {"configurable": {"thread_id": "1"}} + async with aclosing(graph.astream({"value": 1}, thread1)) as stream: + async for chunk in stream: + assert chunk == {"alittlewhile": {"value": 2}} + got_event = True + break + + assert got_event + + # node aparallelwhile should start, but be cancelled + assert aparallelwhile.started is True + assert aparallelwhile.cancelled is True + + # node "awhile" should never start + assert awhile.started is False + + # checkpoint with output of "alittlewhile" should not be saved + # but we should have applied pending writes + if checkpointer is not None: + state = await graph.aget_state(thread1) + assert state is not None + assert state.values == {"value": 3} # 1 + 2 + assert state.next == ("aparallelwhile",) + assert state.metadata == { + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "1", + } + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC_PLUS_NONE) +async def test_cancel_graph_astream_events_v2(checkpointer_name: Optional[str]) -> None: + class State(TypedDict): + value: int + + class AwhileMaker: + def __init__(self) -> None: + self.reset() + + async def __call__(self, input: State) -> Any: + self.started = True + try: + await asyncio.sleep(1.5) + except asyncio.CancelledError: + self.cancelled = True + raise + + def reset(self): + self.started = False + self.cancelled = False + + async def alittlewhile(input: State) -> None: + await asyncio.sleep(0.6) + return {"value": 2} + + awhile = AwhileMaker() + anotherwhile = AwhileMaker() + builder = StateGraph(State) + builder.add_node(alittlewhile) + builder.add_node("awhile", awhile) + builder.add_node("anotherwhile", anotherwhile) + builder.add_edge(START, "alittlewhile") + builder.add_edge("alittlewhile", "awhile") + builder.add_edge("awhile", "anotherwhile") + + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer) + + # test interrupting astream_events v2 + got_event = False + thread2: RunnableConfig = {"configurable": {"thread_id": "2"}} + async with aclosing( + graph.astream_events({"value": 1}, thread2, version="v2") + ) as stream: + async for chunk in stream: + if chunk["event"] == "on_chain_stream" and not chunk["parent_ids"]: + got_event = True + assert chunk["data"]["chunk"] == {"alittlewhile": {"value": 2}} + await asyncio.sleep(0.1) + break + + # did break + assert got_event + + # node "awhile" maybe starts (impl detail of astream_events) + # if it does start, it must be cancelled + if awhile.started: + assert awhile.cancelled is True + + # node "anotherwhile" should never start + assert anotherwhile.started is False + + # checkpoint with output of "alittlewhile" should not be saved + if checkpointer is not None: + state = await graph.aget_state(thread2) + assert state is not None + assert state.values == {"value": 2} + assert state.next == ("awhile",) + assert state.metadata == { + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"alittlewhile": {"value": 2}}, + "thread_id": "2", + } + + +async def test_node_schemas_custom_output() -> None: + class State(TypedDict): + hello: str + bye: str + messages: Annotated[list[str], add_messages] + + class Output(TypedDict): + messages: list[str] + + class StateForA(TypedDict): + hello: str + messages: Annotated[list[str], add_messages] + + async def node_a(state: StateForA): + assert state == { + "hello": "there", + "messages": [_AnyIdHumanMessage(content="hello")], + } + + class StateForB(TypedDict): + bye: str + now: int + + async def node_b(state: StateForB): + assert state == { + "bye": "world", + } + return { + "now": 123, + "hello": "again", + } + + class StateForC(TypedDict): + hello: str + now: int + + async def node_c(state: StateForC): + assert state == { + "hello": "again", + "now": 123, + } + + builder = StateGraph(State, output=Output) + builder.add_node("a", node_a) + builder.add_node("b", node_b) + builder.add_node("c", node_c) + builder.add_edge(START, "a") + builder.add_edge("a", "b") + builder.add_edge("b", "c") + graph = builder.compile() + + assert await graph.ainvoke( + {"hello": "there", "bye": "world", "messages": "hello"} + ) == { + "messages": [_AnyIdHumanMessage(content="hello")], + } + + builder = StateGraph(State, output=Output) + builder.add_node("a", node_a) + builder.add_node("b", node_b) + builder.add_node("c", node_c) + builder.add_edge(START, "a") + builder.add_edge("a", "b") + builder.add_edge("b", "c") + graph = builder.compile() + + assert await graph.ainvoke( + { + "hello": "there", + "bye": "world", + "messages": "hello", + "now": 345, # ignored because not in input schema + } + ) == { + "messages": [_AnyIdHumanMessage(content="hello")], + } + + assert [ + c + async for c in graph.astream( + { + "hello": "there", + "bye": "world", + "messages": "hello", + "now": 345, # ignored because not in input schema + } + ) + ] == [ + {"a": None}, + {"b": {"hello": "again", "now": 123}}, + {"c": None}, + ] + + +async def test_invoke_single_process_in_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={ + "one": chain, + }, + channels={ + "input": LastValue(int), + "output": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + graph = Graph() + graph.add_node("add_one", add_one) + graph.set_entry_point("add_one") + graph.set_finish_point("add_one") + gapp = graph.compile() + + if SHOULD_CHECK_SNAPSHOTS: + assert app.input_schema.model_json_schema() == { + "title": "LangGraphInput", + "type": "integer", + } + assert app.output_schema.model_json_schema() == { + "title": "LangGraphOutput", + "type": "integer", + } + assert await app.ainvoke(2) == 3 + assert await app.ainvoke(2, output_keys=["output"]) == {"output": 3} + + assert await gapp.ainvoke(2) == 3 + + +@pytest.mark.parametrize( + "falsy_value", + [None, False, 0, "", [], {}, set(), frozenset(), 0.0, 0j], +) +async def test_invoke_single_process_in_out_falsy_values(falsy_value: Any) -> None: + graph = Graph() + graph.add_node("return_falsy_const", lambda *args, **kwargs: falsy_value) + graph.set_entry_point("return_falsy_const") + graph.set_finish_point("return_falsy_const") + gapp = graph.compile() + assert falsy_value == await gapp.ainvoke(1) + + +async def test_invoke_single_process_in_write_kwargs(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + chain = ( + Channel.subscribe_to("input") + | add_one + | Channel.write_to("output", fixed=5, output_plus_one=lambda x: x + 1) + ) + + app = Pregel( + nodes={"one": chain}, + channels={ + "input": LastValue(int), + "output": LastValue(int), + "fixed": LastValue(int), + "output_plus_one": LastValue(int), + }, + output_channels=["output", "fixed", "output_plus_one"], + input_channels="input", + ) + + if SHOULD_CHECK_SNAPSHOTS: + assert app.input_schema.model_json_schema() == { + "title": "LangGraphInput", + "type": "integer", + } + assert app.output_schema.model_json_schema() == { + "title": "LangGraphOutput", + "type": "object", + "properties": { + "output": {"title": "Output", "type": "integer", "default": None}, + "fixed": {"title": "Fixed", "type": "integer", "default": None}, + "output_plus_one": { + "title": "Output Plus One", + "type": "integer", + "default": None, + }, + }, + } + assert await app.ainvoke(2) == {"output": 3, "fixed": 5, "output_plus_one": 4} + + +async def test_invoke_single_process_in_out_dict(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": chain}, + channels={"input": LastValue(int), "output": LastValue(int)}, + input_channels="input", + output_channels=["output"], + ) + + if SHOULD_CHECK_SNAPSHOTS: + assert app.input_schema.model_json_schema() == { + "title": "LangGraphInput", + "type": "integer", + } + assert app.output_schema.model_json_schema() == { + "title": "LangGraphOutput", + "type": "object", + "properties": { + "output": {"title": "Output", "type": "integer", "default": None} + }, + } + assert await app.ainvoke(2) == {"output": 3} + + +async def test_invoke_single_process_in_dict_out_dict(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": chain}, + channels={"input": LastValue(int), "output": LastValue(int)}, + input_channels=["input"], + output_channels=["output"], + ) + + if SHOULD_CHECK_SNAPSHOTS: + assert app.input_schema.model_json_schema() == { + "title": "LangGraphInput", + "type": "object", + "properties": { + "input": {"title": "Input", "type": "integer", "default": None} + }, + } + assert app.output_schema.model_json_schema() == { + "title": "LangGraphOutput", + "type": "object", + "properties": { + "output": {"title": "Output", "type": "integer", "default": None} + }, + } + assert await app.ainvoke({"input": 2}) == {"output": 3} + + +async def test_invoke_two_processes_in_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + two = Channel.subscribe_to("inbox") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "inbox": LastValue(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + stream_channels=["inbox", "output"], + ) + + assert await app.ainvoke(2) == 4 + + with pytest.raises(GraphRecursionError): + await app.ainvoke(2, {"recursion_limit": 1}) + + step = 0 + async for values in app.astream(2): + step += 1 + if step == 1: + assert values == { + "inbox": 3, + } + elif step == 2: + assert values == { + "inbox": 3, + "output": 4, + } + assert step == 2 + + graph = Graph() + graph.add_node("add_one", add_one) + graph.add_node("add_one_more", add_one) + graph.set_entry_point("add_one") + graph.set_finish_point("add_one_more") + graph.add_edge("add_one", "add_one_more") + gapp = graph.compile() + + assert await gapp.ainvoke(2) == 4 + + step = 0 + async for values in gapp.astream(2): + step += 1 + if step == 1: + assert values == { + "add_one": 3, + } + elif step == 2: + assert values == { + "add_one_more": 4, + } + assert step == 2 + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_invoke_two_processes_in_out_interrupt( + checkpointer_name: str, mocker: MockerFixture +) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + two = Channel.subscribe_to("inbox") | add_one | Channel.write_to("output") + async with awith_checkpointer(checkpointer_name) as checkpointer: + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "inbox": LastValue(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + checkpointer=checkpointer, + interrupt_after_nodes=["one"], + ) + thread1 = {"configurable": {"thread_id": "1"}} + thread2 = {"configurable": {"thread_id": "2"}} + + # start execution, stop at inbox + assert await app.ainvoke(2, thread1) is None + + # inbox == 3 + checkpoint = await checkpointer.aget(thread1) + assert checkpoint is not None + assert checkpoint["channel_values"]["inbox"] == 3 + + # resume execution, finish + assert await app.ainvoke(None, thread1) == 4 + + # start execution again, stop at inbox + assert await app.ainvoke(20, thread1) is None + + # inbox == 21 + checkpoint = await checkpointer.aget(thread1) + assert checkpoint is not None + assert checkpoint["channel_values"]["inbox"] == 21 + + # send a new value in, interrupting the previous execution + assert await app.ainvoke(3, thread1) is None + assert await app.ainvoke(None, thread1) == 5 + + # start execution again, stopping at inbox + assert await app.ainvoke(20, thread2) is None + + # inbox == 21 + snapshot = await app.aget_state(thread2) + assert snapshot.values["inbox"] == 21 + assert snapshot.next == ("two",) + + # update the state, resume + await app.aupdate_state(thread2, 25, as_node="one") + assert await app.ainvoke(None, thread2) == 26 + + # no pending tasks + snapshot = await app.aget_state(thread2) + assert snapshot.next == () + + # list history + history = [c async for c in app.aget_state_history(thread1)] + assert history == [ + StateSnapshot( + values={"inbox": 4, "output": 5, "input": 3}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 6, + "writes": {"two": 5}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[1].config, + ), + StateSnapshot( + values={"inbox": 4, "output": 4, "input": 3}, + tasks=( + PregelTask(AnyStr(), "two", (PULL, "two"), result={"output": 5}), + ), + next=("two",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 5, + "writes": {"one": None}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[2].config, + ), + StateSnapshot( + values={"inbox": 21, "output": 4, "input": 3}, + tasks=( + PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 4}), + ), + next=("one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "step": 4, + "writes": {"input": 3}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[3].config, + ), + StateSnapshot( + values={"inbox": 21, "output": 4, "input": 20}, + tasks=(PregelTask(AnyStr(), "two", (PULL, "two")),), + next=("two",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"one": None}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[4].config, + ), + StateSnapshot( + values={"inbox": 3, "output": 4, "input": 20}, + tasks=( + PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 21}), + ), + next=("one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "step": 2, + "writes": {"input": 20}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[5].config, + ), + StateSnapshot( + values={"inbox": 3, "output": 4, "input": 2}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"two": 4}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[6].config, + ), + StateSnapshot( + values={"inbox": 3, "input": 2}, + tasks=( + PregelTask(AnyStr(), "two", (PULL, "two"), result={"output": 4}), + ), + next=("two",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": {"one": None}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[7].config, + ), + StateSnapshot( + values={"input": 2}, + tasks=( + PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 3}), + ), + next=("one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "step": -1, + "writes": {"input": 2}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ), + ] + + # forking from any previous checkpoint should re-run nodes + assert [ + c async for c in app.astream(None, history[0].config, stream_mode="updates") + ] == [] + assert [ + c async for c in app.astream(None, history[1].config, stream_mode="updates") + ] == [ + {"two": {"output": 5}}, + ] + assert [ + c async for c in app.astream(None, history[2].config, stream_mode="updates") + ] == [ + {"one": {"inbox": 4}}, + {"__interrupt__": ()}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_fork_always_re_runs_nodes( + checkpointer_name: str, mocker: MockerFixture +) -> None: + add_one = mocker.Mock(side_effect=lambda _: 1) + + builder = StateGraph(Annotated[int, operator.add]) + builder.add_node("add_one", add_one) + builder.add_edge(START, "add_one") + builder.add_conditional_edges("add_one", lambda cnt: "add_one" if cnt < 6 else END) + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer) + + thread1 = {"configurable": {"thread_id": "1"}} + + # start execution, stop at inbox + assert [ + c + async for c in graph.astream(1, thread1, stream_mode=["values", "updates"]) + ] == [ + ("values", 1), + ("updates", {"add_one": 1}), + ("values", 2), + ("updates", {"add_one": 1}), + ("values", 3), + ("updates", {"add_one": 1}), + ("values", 4), + ("updates", {"add_one": 1}), + ("values", 5), + ("updates", {"add_one": 1}), + ("values", 6), + ] + + # list history + history = [c async for c in graph.aget_state_history(thread1)] + assert history == [ + StateSnapshot( + values=6, + next=(), + tasks=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 5, + "writes": {"add_one": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[1].config, + ), + StateSnapshot( + values=5, + tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),), + next=("add_one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 4, + "writes": {"add_one": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[2].config, + ), + StateSnapshot( + values=4, + tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),), + next=("add_one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"add_one": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[3].config, + ), + StateSnapshot( + values=3, + tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),), + next=("add_one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 2, + "writes": {"add_one": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[4].config, + ), + StateSnapshot( + values=2, + tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),), + next=("add_one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"add_one": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[5].config, + ), + StateSnapshot( + values=1, + tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),), + next=("add_one",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=history[6].config, + ), + StateSnapshot( + values=0, + tasks=( + PregelTask(AnyStr(), "__start__", (PULL, "__start__"), result=1), + ), + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "step": -1, + "writes": {"__start__": 1}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ), + ] + + # forking from any previous checkpoint should re-run nodes + assert [ + c + async for c in graph.astream(None, history[0].config, stream_mode="updates") + ] == [] + assert [ + c + async for c in graph.astream(None, history[1].config, stream_mode="updates") + ] == [ + {"add_one": 1}, + ] + assert [ + c + async for c in graph.astream(None, history[2].config, stream_mode="updates") + ] == [ + {"add_one": 1}, + {"add_one": 1}, + ] + + +async def test_invoke_two_processes_in_dict_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + two = ( + Channel.subscribe_to("inbox") + | RunnableLambda(add_one).abatch + | Channel.write_to("output").abatch + ) + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "inbox": Topic(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels=["input", "inbox"], + stream_channels=["output", "inbox"], + output_channels=["output"], + ) + + # [12 + 1, 2 + 1 + 1] + assert [ + c + async for c in app.astream( + {"input": 2, "inbox": 12}, output_keys="output", stream_mode="updates" + ) + ] == [ + {"one": None}, + {"two": 13}, + {"two": 4}, + ] + assert [ + c async for c in app.astream({"input": 2, "inbox": 12}, output_keys="output") + ] == [13, 4] + + assert [ + c async for c in app.astream({"input": 2, "inbox": 12}, stream_mode="updates") + ] == [ + {"one": {"inbox": 3}}, + {"two": {"output": 13}}, + {"two": {"output": 4}}, + ] + assert [c async for c in app.astream({"input": 2, "inbox": 12})] == [ + {"inbox": [3], "output": 13}, + {"output": 4}, + ] + assert [ + c async for c in app.astream({"input": 2, "inbox": 12}, stream_mode="debug") + ] == [ + { + "type": "task", + "timestamp": AnyStr(), + "step": 0, + "payload": { + "id": AnyStr(), + "name": "one", + "input": 2, + "triggers": ["input"], + }, + }, + { + "type": "task", + "timestamp": AnyStr(), + "step": 0, + "payload": { + "id": AnyStr(), + "name": "two", + "input": [12], + "triggers": ["inbox"], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 0, + "payload": { + "id": AnyStr(), + "name": "one", + "result": [("inbox", 3)], + "error": None, + "interrupts": [], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 0, + "payload": { + "id": AnyStr(), + "name": "two", + "result": [("output", 13)], + "error": None, + "interrupts": [], + }, + }, + { + "type": "task", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "two", + "input": [3], + "triggers": ["inbox"], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "two", + "result": [("output", 4)], + "error": None, + "interrupts": [], + }, + }, + ] + + +async def test_batch_two_processes_in_out() -> None: + async def add_one_with_delay(inp: int) -> int: + await asyncio.sleep(inp / 10) + return inp + 1 + + one = Channel.subscribe_to("input") | add_one_with_delay | Channel.write_to("one") + two = Channel.subscribe_to("one") | add_one_with_delay | Channel.write_to("output") + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "one": LastValue(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + + assert await app.abatch([3, 2, 1, 3, 5]) == [5, 4, 3, 5, 7] + assert await app.abatch([3, 2, 1, 3, 5], output_keys=["output"]) == [ + {"output": 5}, + {"output": 4}, + {"output": 3}, + {"output": 5}, + {"output": 7}, + ] + + graph = Graph() + graph.add_node("add_one", add_one_with_delay) + graph.add_node("add_one_more", add_one_with_delay) + graph.set_entry_point("add_one") + graph.set_finish_point("add_one_more") + graph.add_edge("add_one", "add_one_more") + gapp = graph.compile() + + assert await gapp.abatch([3, 2, 1, 3, 5]) == [5, 4, 3, 5, 7] + + +async def test_invoke_many_processes_in_out(mocker: MockerFixture) -> None: + test_size = 100 + add_one = mocker.Mock(side_effect=lambda x: x + 1) + + nodes = {"-1": Channel.subscribe_to("input") | add_one | Channel.write_to("-1")} + for i in range(test_size - 2): + nodes[str(i)] = ( + Channel.subscribe_to(str(i - 1)) | add_one | Channel.write_to(str(i)) + ) + nodes["last"] = Channel.subscribe_to(str(i)) | add_one | Channel.write_to("output") + + app = Pregel( + nodes=nodes, + channels={str(i): LastValue(int) for i in range(-1, test_size - 2)} + | {"input": LastValue(int), "output": LastValue(int)}, + input_channels="input", + output_channels="output", + ) + + # No state is left over from previous invocations + for _ in range(10): + assert await app.ainvoke(2, {"recursion_limit": test_size}) == 2 + test_size + + # Concurrent invocations do not interfere with each other + assert await asyncio.gather( + *(app.ainvoke(2, {"recursion_limit": test_size}) for _ in range(10)) + ) == [2 + test_size for _ in range(10)] + + +async def test_batch_many_processes_in_out(mocker: MockerFixture) -> None: + test_size = 100 + add_one = mocker.Mock(side_effect=lambda x: x + 1) + + nodes = {"-1": Channel.subscribe_to("input") | add_one | Channel.write_to("-1")} + for i in range(test_size - 2): + nodes[str(i)] = ( + Channel.subscribe_to(str(i - 1)) | add_one | Channel.write_to(str(i)) + ) + nodes["last"] = Channel.subscribe_to(str(i)) | add_one | Channel.write_to("output") + + app = Pregel( + nodes=nodes, + channels={str(i): LastValue(int) for i in range(-1, test_size - 2)} + | {"input": LastValue(int), "output": LastValue(int)}, + input_channels="input", + output_channels="output", + ) + + # No state is left over from previous invocations + for _ in range(3): + # Then invoke pubsub + assert await app.abatch([2, 1, 3, 4, 5], {"recursion_limit": test_size}) == [ + 2 + test_size, + 1 + test_size, + 3 + test_size, + 4 + test_size, + 5 + test_size, + ] + + # Concurrent invocations do not interfere with each other + assert await asyncio.gather( + *(app.abatch([2, 1, 3, 4, 5], {"recursion_limit": test_size}) for _ in range(3)) + ) == [ + [2 + test_size, 1 + test_size, 3 + test_size, 4 + test_size, 5 + test_size] + for _ in range(3) + ] + + +async def test_invoke_two_processes_two_in_two_out_invalid( + mocker: MockerFixture, +) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + + one = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + two = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": one, "two": two}, + channels={"output": LastValue(int), "input": LastValue(int)}, + input_channels="input", + output_channels="output", + ) + + with pytest.raises(InvalidUpdateError): + # LastValue channels can only be updated once per iteration + await app.ainvoke(2) + + +async def test_invoke_two_processes_two_in_two_out_valid(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + + one = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + two = Channel.subscribe_to("input") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "input": LastValue(int), + "output": Topic(int), + }, + input_channels="input", + output_channels="output", + ) + + # An Topic channel accumulates updates into a sequence + assert await app.ainvoke(2) == [3, 3] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_invoke_checkpoint(mocker: MockerFixture, checkpointer_name: str) -> None: + add_one = mocker.Mock(side_effect=lambda x: x["total"] + x["input"]) + errored_once = False + + def raise_if_above_10(input: int) -> int: + nonlocal errored_once + if input > 4: + if errored_once: + pass + else: + errored_once = True + raise ConnectionError("I will be retried") + if input > 10: + raise ValueError("Input is too large") + return input + + one = ( + Channel.subscribe_to(["input"]).join(["total"]) + | add_one + | Channel.write_to("output", "total") + | raise_if_above_10 + ) + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app = Pregel( + nodes={"one": one}, + channels={ + "total": BinaryOperatorAggregate(int, operator.add), + "input": LastValue(int), + "output": LastValue(int), + }, + input_channels="input", + output_channels="output", + checkpointer=checkpointer, + retry_policy=RetryPolicy(), + ) + + # total starts out as 0, so output is 0+2=2 + assert await app.ainvoke(2, {"configurable": {"thread_id": "1"}}) == 2 + checkpoint = await checkpointer.aget({"configurable": {"thread_id": "1"}}) + assert checkpoint is not None + assert checkpoint["channel_values"].get("total") == 2 + # total is now 2, so output is 2+3=5 + assert await app.ainvoke(3, {"configurable": {"thread_id": "1"}}) == 5 + assert errored_once, "errored and retried" + checkpoint = await checkpointer.aget({"configurable": {"thread_id": "1"}}) + assert checkpoint is not None + assert checkpoint["channel_values"].get("total") == 7 + # total is now 2+5=7, so output would be 7+4=11, but raises ValueError + with pytest.raises(ValueError): + await app.ainvoke(4, {"configurable": {"thread_id": "1"}}) + # checkpoint is not updated + checkpoint = await checkpointer.aget({"configurable": {"thread_id": "1"}}) + assert checkpoint is not None + assert checkpoint["channel_values"].get("total") == 7 + # on a new thread, total starts out as 0, so output is 0+5=5 + assert await app.ainvoke(5, {"configurable": {"thread_id": "2"}}) == 5 + checkpoint = await checkpointer.aget({"configurable": {"thread_id": "1"}}) + assert checkpoint is not None + assert checkpoint["channel_values"].get("total") == 7 + checkpoint = await checkpointer.aget({"configurable": {"thread_id": "2"}}) + assert checkpoint is not None + assert checkpoint["channel_values"].get("total") == 5 + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_pending_writes_resume( + request: pytest.FixtureRequest, checkpointer_name: str +) -> None: + class State(TypedDict): + value: Annotated[int, operator.add] + + class AwhileMaker: + def __init__(self, sleep: float, rtn: Union[Dict, Exception]) -> None: + self.sleep = sleep + self.rtn = rtn + self.reset() + + async def __call__(self, input: State) -> Any: + self.calls += 1 + await asyncio.sleep(self.sleep) + if isinstance(self.rtn, Exception): + raise self.rtn + else: + return self.rtn + + def reset(self): + self.calls = 0 + + one = AwhileMaker(0.1, {"value": 2}) + two = AwhileMaker(0.3, ConnectionError("I'm not good")) + builder = StateGraph(State) + builder.add_node("one", one) + builder.add_node("two", two, retry=RetryPolicy(max_attempts=2)) + builder.add_edge(START, "one") + builder.add_edge(START, "two") + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer) + + thread1: RunnableConfig = {"configurable": {"thread_id": "1"}} + with pytest.raises(ConnectionError, match="I'm not good"): + await graph.ainvoke({"value": 1}, thread1) + + # both nodes should have been called once + assert one.calls == 1 + assert two.calls == 2 + + # latest checkpoint should be before nodes "one", "two" + # but we should have applied pending writes from "one" + state = await graph.aget_state(thread1) + assert state is not None + assert state.values == {"value": 3} + assert state.next == ("two",) + assert state.tasks == ( + PregelTask(AnyStr(), "one", (PULL, "one"), result={"value": 2}), + PregelTask( + AnyStr(), + "two", + (PULL, "two"), + 'ConnectionError("I\'m not good")', + ), + ) + assert state.metadata == { + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "1", + } + # get_state with checkpoint_id should not apply any pending writes + state = await graph.aget_state(state.config) + assert state is not None + assert state.values == {"value": 1} + assert state.next == ("one", "two") + # should contain pending write of "one" + checkpoint = await checkpointer.aget_tuple(thread1) + assert checkpoint is not None + # should contain error from "two" + expected_writes = [ + (AnyStr(), "one", "one"), + (AnyStr(), "value", 2), + (AnyStr(), ERROR, 'ConnectionError("I\'m not good")'), + ] + assert len(checkpoint.pending_writes) == 3 + assert all(w in expected_writes for w in checkpoint.pending_writes) + # both non-error pending writes come from same task + non_error_writes = [w for w in checkpoint.pending_writes if w[1] != ERROR] + assert non_error_writes[0][0] == non_error_writes[1][0] + # error write is from the other task + error_write = next(w for w in checkpoint.pending_writes if w[1] == ERROR) + assert error_write[0] != non_error_writes[0][0] + + # resume execution + with pytest.raises(ConnectionError, match="I'm not good"): + await graph.ainvoke(None, thread1) + + # node "one" succeeded previously, so shouldn't be called again + assert one.calls == 1 + # node "two" should have been called once again + assert two.calls == 4 + + # confirm no new checkpoints saved + state_two = await graph.aget_state(thread1) + assert state_two.metadata == state.metadata + + # resume execution, without exception + two.rtn = {"value": 3} + # both the pending write and the new write were applied, 1 + 2 + 3 = 6 + assert await graph.ainvoke(None, thread1) == {"value": 6} + + # check all final checkpoints + checkpoints = [c async for c in checkpointer.alist(thread1)] + # we should have 3 + assert len(checkpoints) == 3 + # the last one not too interesting for this test + assert checkpoints[0] == CheckpointTuple( + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + checkpoint={ + "v": 1, + "id": AnyStr(), + "ts": AnyStr(), + "pending_sends": [], + "versions_seen": { + "one": { + "start:one": AnyVersion(), + }, + "two": { + "start:two": AnyVersion(), + }, + "__input__": {}, + "__start__": { + "__start__": AnyVersion(), + }, + "__interrupt__": { + "value": AnyVersion(), + "__start__": AnyVersion(), + "start:one": AnyVersion(), + "start:two": AnyVersion(), + }, + }, + "channel_versions": { + "one": AnyVersion(), + "two": AnyVersion(), + "value": AnyVersion(), + "__start__": AnyVersion(), + "start:one": AnyVersion(), + "start:two": AnyVersion(), + }, + "channel_values": {"one": "one", "two": "two", "value": 6}, + }, + metadata={ + "parents": {}, + "step": 1, + "source": "loop", + "writes": {"one": {"value": 2}, "two": {"value": 3}}, + "thread_id": "1", + }, + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": checkpoints[1].config["configurable"][ + "checkpoint_id" + ], + } + }, + pending_writes=[], + ) + # the previous one we assert that pending writes contains both + # - original error + # - successful writes from resuming after preventing error + assert checkpoints[1] == CheckpointTuple( + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + checkpoint={ + "v": 1, + "id": AnyStr(), + "ts": AnyStr(), + "pending_sends": [], + "versions_seen": { + "__input__": {}, + "__start__": { + "__start__": AnyVersion(), + }, + }, + "channel_versions": { + "value": AnyVersion(), + "__start__": AnyVersion(), + "start:one": AnyVersion(), + "start:two": AnyVersion(), + }, + "channel_values": { + "value": 1, + "start:one": "__start__", + "start:two": "__start__", + }, + }, + metadata={ + "parents": {}, + "step": 0, + "source": "loop", + "writes": None, + "thread_id": "1", + }, + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": checkpoints[2].config["configurable"][ + "checkpoint_id" + ], + } + }, + pending_writes=UnsortedSequence( + (AnyStr(), "one", "one"), + (AnyStr(), "value", 2), + (AnyStr(), "__error__", 'ConnectionError("I\'m not good")'), + (AnyStr(), "two", "two"), + (AnyStr(), "value", 3), + ), + ) + assert checkpoints[2] == CheckpointTuple( + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + checkpoint={ + "v": 1, + "id": AnyStr(), + "ts": AnyStr(), + "pending_sends": [], + "versions_seen": {"__input__": {}}, + "channel_versions": { + "__start__": AnyVersion(), + }, + "channel_values": {"__start__": {"value": 1}}, + }, + metadata={ + "parents": {}, + "step": -1, + "source": "input", + "writes": {"__start__": {"value": 1}}, + "thread_id": "1", + }, + parent_config=None, + pending_writes=UnsortedSequence( + (AnyStr(), "value", 1), + (AnyStr(), "start:one", "__start__"), + (AnyStr(), "start:two", "__start__"), + ), + ) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_run_from_checkpoint_id_retains_previous_writes( + request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture +) -> None: + class MyState(TypedDict): + myval: Annotated[int, operator.add] + otherval: bool + + class Anode: + def __init__(self): + self.switch = False + + async def __call__(self, state: MyState): + self.switch = not self.switch + return {"myval": 2 if self.switch else 1, "otherval": self.switch} + + builder = StateGraph(MyState) + thenode = Anode() # Fun. + builder.add_node("node_one", thenode) + builder.add_node("node_two", thenode) + builder.add_edge(START, "node_one") + + def _getedge(src: str): + swap = "node_one" if src == "node_two" else "node_two" + + def _edge(st: MyState) -> Literal["__end__", "node_one", "node_two"]: + if st["myval"] > 3: + return END + if st["otherval"]: + return swap + return src + + return _edge + + builder.add_conditional_edges("node_one", _getedge("node_one")) + builder.add_conditional_edges("node_two", _getedge("node_two")) + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer) + + thread_id = uuid.uuid4() + thread1 = {"configurable": {"thread_id": str(thread_id)}} + + result = await graph.ainvoke({"myval": 1}, thread1) + assert result["myval"] == 4 + history = [c async for c in graph.aget_state_history(thread1)] + + assert len(history) == 4 + assert history[-1].values == {"myval": 0} + assert history[0].values == {"myval": 4, "otherval": False} + + second_run_config = { + **thread1, + "configurable": { + **thread1["configurable"], + "checkpoint_id": history[1].config["configurable"]["checkpoint_id"], + }, + } + second_result = await graph.ainvoke(None, second_run_config) + assert second_result == {"myval": 5, "otherval": True} + + new_history = [ + c + async for c in graph.aget_state_history( + {"configurable": {"thread_id": str(thread_id), "checkpoint_ns": ""}} + ) + ] + + assert len(new_history) == len(history) + 1 + for original, new in zip(history, new_history[1:]): + assert original.values == new.values + assert original.next == new.next + assert original.metadata["step"] == new.metadata["step"] + + def _get_tasks(hist: list, start: int): + return [h.tasks for h in hist[start:]] + + assert _get_tasks(new_history, 1) == _get_tasks(history, 0) + + +async def test_cond_edge_after_send() -> None: + class Node: + def __init__(self, name: str): + self.name = name + self.__name__ = name + + async def __call__(self, state): + return [self.name] + + async def send_for_fun(state): + return [Send("2", state), Send("2", state)] + + async def route_to_three(state) -> Literal["3"]: + return "3" + + builder = StateGraph(Annotated[list, operator.add]) + builder.add_node(Node("1")) + builder.add_node(Node("2")) + builder.add_node(Node("3")) + builder.add_edge(START, "1") + builder.add_conditional_edges("1", send_for_fun) + builder.add_conditional_edges("2", route_to_three) + graph = builder.compile() + + assert await graph.ainvoke(["0"]) == ["0", "1", "2", "2", "3"] + + +async def test_concurrent_emit_sends() -> None: + class Node: + def __init__(self, name: str): + self.name = name + self.__name__ = name + + async def __call__(self, state): + return ( + [self.name] + if isinstance(state, list) + else ["|".join((self.name, str(state)))] + ) + + async def send_for_fun(state): + return [Send("2", 1), Send("2", 2), "3.1"] + + async def send_for_profit(state): + return [Send("2", 3), Send("2", 4)] + + async def route_to_three(state) -> Literal["3"]: + return "3" + + builder = StateGraph(Annotated[list, operator.add]) + builder.add_node(Node("1")) + builder.add_node(Node("1.1")) + builder.add_node(Node("2")) + builder.add_node(Node("3")) + builder.add_node(Node("3.1")) + builder.add_edge(START, "1") + builder.add_edge(START, "1.1") + builder.add_conditional_edges("1", send_for_fun) + builder.add_conditional_edges("1.1", send_for_profit) + builder.add_conditional_edges("2", route_to_three) + graph = builder.compile() + assert await graph.ainvoke(["0"]) == ( + [ + "0", + "1", + "1.1", + "2|1", + "2|2", + "2|3", + "2|4", + "3", + "3.1", + ] + if FF_SEND_V2 + else [ + "0", + "1", + "1.1", + "3.1", + "2|1", + "2|2", + "2|3", + "2|4", + "3", + ] + ) + + +@pytest.mark.repeat(10) +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_send_sequences(checkpointer_name: str) -> None: + class Node: + def __init__(self, name: str): + self.name = name + self.__name__ = name + + async def __call__(self, state): + update = ( + [self.name] + if isinstance(state, list) # or isinstance(state, Control) + else ["|".join((self.name, str(state)))] + ) + if isinstance(state, GraphCommand): + return replace(state, update=update) + else: + return update + + async def send_for_fun(state): + return [ + Send("2", GraphCommand(send=Send("2", 3))), + Send("2", GraphCommand(send=Send("2", 4))), + "3.1", + ] + + async def route_to_three(state) -> Literal["3"]: + return "3" + + builder = StateGraph(Annotated[list, operator.add]) + builder.add_node(Node("1")) + builder.add_node(Node("2")) + builder.add_node(Node("3")) + builder.add_node(Node("3.1")) + builder.add_edge(START, "1") + builder.add_conditional_edges("1", send_for_fun) + builder.add_conditional_edges("2", route_to_three) + graph = builder.compile() + assert ( + await graph.ainvoke(["0"]) + == [ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='2', arg=4))", + "2|3", + "2|4", + "3", + "3.1", + ] + if FF_SEND_V2 + else [ + "0", + "1", + "3.1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='2', arg=4))", + "3", + "2|3", + "2|4", + "3", + ] + ) + + if not FF_SEND_V2: + return + + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["3.1"]) + thread1 = {"configurable": {"thread_id": "1"}} + assert await graph.ainvoke(["0"], thread1) == [ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='2', arg=4))", + "2|3", + "2|4", + ] + assert await graph.ainvoke(None, thread1) == [ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='2', arg=4))", + "2|3", + "2|4", + "3", + "3.1", + ] + + +@pytest.mark.repeat(20) +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_send_dedupe_on_resume(checkpointer_name: str) -> None: + if not FF_SEND_V2: + pytest.skip("Send deduplication is only available in Send V2") + + class InterruptOnce: + ticks: int = 0 + + def __call__(self, state): + self.ticks += 1 + if self.ticks == 1: + raise NodeInterrupt("Bahh") + return ["|".join(("flaky", str(state)))] + + class Node: + def __init__(self, name: str): + self.name = name + self.ticks = 0 + self.__name__ = name + + def __call__(self, state): + self.ticks += 1 + update = ( + [self.name] + if isinstance(state, list) + else ["|".join((self.name, str(state)))] + ) + if isinstance(state, GraphCommand): + return replace(state, update=update) + else: + return update + + def send_for_fun(state): + return [ + Send("2", GraphCommand(send=Send("2", 3))), + Send("2", GraphCommand(send=Send("flaky", 4))), + "3.1", + ] + + def route_to_three(state) -> Literal["3"]: + return "3" + + builder = StateGraph(Annotated[list, operator.add]) + builder.add_node(Node("1")) + builder.add_node(Node("2")) + builder.add_node(Node("3")) + builder.add_node(Node("3.1")) + builder.add_node("flaky", InterruptOnce()) + builder.add_edge(START, "1") + builder.add_conditional_edges("1", send_for_fun) + builder.add_conditional_edges("2", route_to_three) + + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer) + thread1 = {"configurable": {"thread_id": "1"}} + assert await graph.ainvoke(["0"], thread1, debug=1) == [ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='flaky', arg=4))", + "2|3", + ] + assert builder.nodes["2"].runnable.func.ticks == 3 + assert builder.nodes["flaky"].runnable.func.ticks == 1 + # resume execution + assert await graph.ainvoke(None, thread1, debug=1) == [ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='flaky', arg=4))", + "2|3", + "flaky|4", + "3", + "3.1", + ] + # node "2" doesn't get called again, as we recover writes saved before + assert builder.nodes["2"].runnable.func.ticks == 3 + # node "flaky" gets called again, as it was interrupted + assert builder.nodes["flaky"].runnable.func.ticks == 2 + # check history + history = [c async for c in graph.aget_state_history(thread1)] + assert history == [ + StateSnapshot( + values=[ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='flaky', arg=4))", + "2|3", + "flaky|4", + "3", + "3.1", + ], + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": {"3": ["3"], "3.1": ["3.1"]}, + "thread_id": "1", + "step": 2, + "parents": {}, + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=(), + ), + StateSnapshot( + values=[ + "0", + "1", + "2|Command(send=Send(node='2', arg=3))", + "2|Command(send=Send(node='flaky', arg=4))", + "2|3", + "flaky|4", + ], + next=("3", "3.1"), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": { + "1": ["1"], + "2": [ + ["2|Command(send=Send(node='2', arg=3))"], + ["2|Command(send=Send(node='flaky', arg=4))"], + ["2|3"], + ], + "flaky": ["flaky|4"], + }, + "thread_id": "1", + "step": 1, + "parents": {}, + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="3", + path=("__pregel_pull", "3"), + error=None, + interrupts=(), + state=None, + result=["3"], + ), + PregelTask( + id=AnyStr(), + name="3.1", + path=("__pregel_pull", "3.1"), + error=None, + interrupts=(), + state=None, + result=["3.1"], + ), + ), + ), + StateSnapshot( + values=["0"], + next=("1", "2", "2", "2", "flaky"), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": None, + "thread_id": "1", + "step": 0, + "parents": {}, + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="1", + path=("__pregel_pull", "1"), + error=None, + interrupts=(), + state=None, + result=["1"], + ), + PregelTask( + id=AnyStr(), + name="2", + path=( + "__pregel_push", + ("__pregel_pull", "1"), + 2, + AnyStr(), + ), + error=None, + interrupts=(), + state=None, + result=["2|Command(send=Send(node='2', arg=3))"], + ), + PregelTask( + id=AnyStr(), + name="2", + path=( + "__pregel_push", + ("__pregel_pull", "1"), + 3, + AnyStr(), + ), + error=None, + interrupts=(), + state=None, + result=["2|Command(send=Send(node='flaky', arg=4))"], + ), + PregelTask( + id=AnyStr(), + name="2", + path=( + "__pregel_push", + ( + "__pregel_push", + ("__pregel_pull", "1"), + 2, + AnyStr(), + ), + 2, + AnyStr(), + ), + error=None, + interrupts=(), + state=None, + result=["2|3"], + ), + PregelTask( + id=AnyStr(), + name="flaky", + path=( + "__pregel_push", + ( + "__pregel_push", + ("__pregel_pull", "1"), + 3, + AnyStr(), + ), + 2, + AnyStr(), + ), + error=None, + interrupts=(Interrupt(value="Bahh", when="during"),), + state=None, + result=["flaky|4"], + ), + ), + ), + StateSnapshot( + values=[], + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "input", + "writes": {"__start__": ["0"]}, + "thread_id": "1", + "step": -1, + "parents": {}, + }, + created_at=AnyStr(), + parent_config=None, + tasks=( + PregelTask( + id=AnyStr(), + name="__start__", + path=("__pregel_pull", "__start__"), + error=None, + interrupts=(), + state=None, + result=["0"], + ), + ), + ), + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_send_react_interrupt(checkpointer_name: str) -> None: + from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage + + ai_message = AIMessage( + "", + id="ai1", + tool_calls=[ToolCall(name="foo", args={"hi": [1, 2, 3]}, id=AnyStr())], + ) + + async def agent(state): + return {"messages": ai_message} + + def route(state): + if isinstance(state["messages"][-1], AIMessage): + return [ + Send(call["name"], call) for call in state["messages"][-1].tool_calls + ] + + foo_called = 0 + + async def foo(call: ToolCall): + nonlocal foo_called + foo_called += 1 + return {"messages": ToolMessage(str(call["args"]), tool_call_id=call["id"])} + + builder = StateGraph(MessagesState) + builder.add_node(agent) + builder.add_node(foo) + builder.add_edge(START, "agent") + builder.add_conditional_edges("agent", route) + graph = builder.compile() + + assert await graph.ainvoke({"messages": [HumanMessage("hello")]}) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + _AnyIdToolMessage( + content="{'hi': [1, 2, 3]}", + tool_call_id=AnyStr(), + ), + ] + } + assert foo_called == 1 + + async with awith_checkpointer(checkpointer_name) as checkpointer: + # simple interrupt-resume flow + foo_called = 0 + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"]) + thread1 = {"configurable": {"thread_id": "1"}} + assert await graph.ainvoke({"messages": [HumanMessage("hello")]}, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + } + assert foo_called == 0 + assert await graph.ainvoke(None, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + _AnyIdToolMessage( + content="{'hi': [1, 2, 3]}", + tool_call_id=AnyStr(), + ), + ] + } + assert foo_called == 1 + + # interrupt-update-resume flow + foo_called = 0 + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"]) + thread1 = {"configurable": {"thread_id": "2"}} + assert await graph.ainvoke({"messages": [HumanMessage("hello")]}, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + } + assert foo_called == 0 + + if not FF_SEND_V2: + return + + # get state should show the pending task + state = await graph.aget_state(thread1) + assert state == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + }, + next=("foo",), + config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 0, + "source": "loop", + "writes": None, + "parents": {}, + "thread_id": "2", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + content="", + additional_kwargs={}, + response_metadata={}, + id="ai1", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ) + }, + ), + PregelTask( + id=AnyStr(), + name="foo", + path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()), + error=None, + interrupts=(), + state=None, + result=None, + ), + ), + ) + + # remove the tool call, clearing the pending task + await graph.aupdate_state( + thread1, {"messages": AIMessage("Bye now", id=ai_message.id, tool_calls=[])} + ) + + # tool call no longer in pending tasks + assert await graph.aget_state(thread1) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="Bye now", + tool_calls=[], + ), + ] + }, + next=(), + config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 1, + "source": "update", + "writes": { + "agent": { + "messages": _AnyIdAIMessage( + content="Bye now", + tool_calls=[], + ) + } + }, + "parents": {}, + "thread_id": "2", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=(), + ) + + # tool call not executed + assert await graph.ainvoke(None, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage(content="Bye now"), + ] + } + assert foo_called == 0 + + # interrupt-update-resume flow, creating new Send in update call + foo_called = 0 + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"]) + thread1 = {"configurable": {"thread_id": "3"}} + assert await graph.ainvoke({"messages": [HumanMessage("hello")]}, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + } + assert foo_called == 0 + + # get state should show the pending task + state = await graph.aget_state(thread1) + assert state == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + }, + next=("foo",), + config={ + "configurable": { + "thread_id": "3", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 0, + "source": "loop", + "writes": None, + "parents": {}, + "thread_id": "3", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "3", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + "", + id="ai1", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ) + }, + ), + PregelTask( + id=AnyStr(), + name="foo", + path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()), + error=None, + interrupts=(), + state=None, + result=None, + ), + ), + ) + + # replace the tool call, should clear previous send, create new one + await graph.aupdate_state( + thread1, + { + "messages": AIMessage( + "", + id=ai_message.id, + tool_calls=[ + { + "name": "foo", + "args": {"hi": [4, 5, 6]}, + "id": "tool1", + "type": "tool_call", + } + ], + ) + }, + ) + + # prev tool call no longer in pending tasks, new tool call is + assert await graph.aget_state(thread1) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [4, 5, 6]}, + "id": "tool1", + "type": "tool_call", + } + ], + ), + ] + }, + next=("foo",), + config={ + "configurable": { + "thread_id": "3", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 1, + "source": "update", + "writes": { + "agent": { + "messages": _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [4, 5, 6]}, + "id": "tool1", + "type": "tool_call", + } + ], + ) + } + }, + "parents": {}, + "thread_id": "3", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "3", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="foo", + path=("__pregel_push", (), 0, AnyStr()), + error=None, + interrupts=(), + state=None, + result=None, + ), + ), + ) + + # prev tool call not executed, new tool call is + assert await graph.ainvoke(None, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + AIMessage( + "", + id="ai1", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [4, 5, 6]}, + "id": "tool1", + "type": "tool_call", + } + ], + ), + _AnyIdToolMessage(content="{'hi': [4, 5, 6]}", tool_call_id="tool1"), + ] + } + assert foo_called == 1 + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_send_react_interrupt_control( + checkpointer_name: str, snapshot: SnapshotAssertion +) -> None: + from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage + + ai_message = AIMessage( + "", + id="ai1", + tool_calls=[ToolCall(name="foo", args={"hi": [1, 2, 3]}, id=AnyStr())], + ) + + async def agent(state) -> Command[Literal["foo"]]: + return GraphCommand( + update={"messages": ai_message}, + send=[Send(call["name"], call) for call in ai_message.tool_calls], + ) + + foo_called = 0 + + async def foo(call: ToolCall): + nonlocal foo_called + foo_called += 1 + return {"messages": ToolMessage(str(call["args"]), tool_call_id=call["id"])} + + builder = StateGraph(MessagesState) + builder.add_node(agent) + builder.add_node(foo) + builder.add_edge(START, "agent") + graph = builder.compile() + assert graph.get_graph().draw_mermaid() == snapshot + + assert await graph.ainvoke({"messages": [HumanMessage("hello")]}) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + _AnyIdToolMessage( + content="{'hi': [1, 2, 3]}", + tool_call_id=AnyStr(), + ), + ] + } + assert foo_called == 1 + + async with awith_checkpointer(checkpointer_name) as checkpointer: + # simple interrupt-resume flow + foo_called = 0 + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"]) + thread1 = {"configurable": {"thread_id": "1"}} + assert await graph.ainvoke({"messages": [HumanMessage("hello")]}, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + } + assert foo_called == 0 + assert await graph.ainvoke(None, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + _AnyIdToolMessage( + content="{'hi': [1, 2, 3]}", + tool_call_id=AnyStr(), + ), + ] + } + assert foo_called == 1 + + # interrupt-update-resume flow + foo_called = 0 + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"]) + thread1 = {"configurable": {"thread_id": "2"}} + assert await graph.ainvoke({"messages": [HumanMessage("hello")]}, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + } + assert foo_called == 0 + + if not FF_SEND_V2: + return + + # get state should show the pending task + state = await graph.aget_state(thread1) + assert state == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ), + ] + }, + next=("foo",), + config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 0, + "source": "loop", + "writes": None, + "parents": {}, + "thread_id": "2", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + content="", + additional_kwargs={}, + response_metadata={}, + id="ai1", + tool_calls=[ + { + "name": "foo", + "args": {"hi": [1, 2, 3]}, + "id": "", + "type": "tool_call", + } + ], + ) + }, + ), + PregelTask( + id=AnyStr(), + name="foo", + path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()), + error=None, + interrupts=(), + state=None, + result=None, + ), + ), + ) + + # remove the tool call, clearing the pending task + await graph.aupdate_state( + thread1, {"messages": AIMessage("Bye now", id=ai_message.id, tool_calls=[])} + ) + + # tool call no longer in pending tasks + assert await graph.aget_state(thread1) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage( + content="Bye now", + tool_calls=[], + ), + ] + }, + next=(), + config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "step": 1, + "source": "update", + "writes": { + "agent": { + "messages": _AnyIdAIMessage( + content="Bye now", + tool_calls=[], + ) + } + }, + "parents": {}, + "thread_id": "2", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "2", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=(), + ) + + # tool call not executed + assert await graph.ainvoke(None, thread1) == { + "messages": [ + _AnyIdHumanMessage(content="hello"), + _AnyIdAIMessage(content="Bye now"), + ] + } + assert foo_called == 0 + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_max_concurrency(checkpointer_name: str) -> None: + class Node: + def __init__(self, name: str): + self.name = name + self.__name__ = name + self.currently = 0 + self.max_currently = 0 + + async def __call__(self, state): + self.currently += 1 + if self.currently > self.max_currently: + self.max_currently = self.currently + await asyncio.sleep(random.random() / 10) + self.currently -= 1 + return [state] + + def one(state): + return ["1"] + + def three(state): + return ["3"] + + async def send_to_many(state): + return [Send("2", idx) for idx in range(100)] + + async def route_to_three(state) -> Literal["3"]: + return "3" + + node2 = Node("2") + builder = StateGraph(Annotated[list, operator.add]) + builder.add_node("1", one) + builder.add_node(node2) + builder.add_node("3", three) + builder.add_edge(START, "1") + builder.add_conditional_edges("1", send_to_many) + builder.add_conditional_edges("2", route_to_three) + graph = builder.compile() + + assert await graph.ainvoke(["0"]) == ["0", "1", *range(100), "3"] + assert node2.max_currently == 100 + assert node2.currently == 0 + node2.max_currently = 0 + + assert await graph.ainvoke(["0"], {"max_concurrency": 10}) == [ + "0", + "1", + *range(100), + "3", + ] + assert node2.max_currently == 10 + assert node2.currently == 0 + + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["2"]) + thread1 = {"max_concurrency": 10, "configurable": {"thread_id": "1"}} + + assert await graph.ainvoke(["0"], thread1, debug=True) == ["0", "1"] + state = await graph.aget_state(thread1) + assert state.values == ["0", "1"] + assert await graph.ainvoke(None, thread1) == ["0", "1", *range(100), "3"] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_max_concurrency_control(checkpointer_name: str) -> None: + async def node1(state) -> GraphCommand[Literal["2"]]: + return GraphCommand(update=["1"], send=[Send("2", idx) for idx in range(100)]) + + node2_currently = 0 + node2_max_currently = 0 + + async def node2(state) -> GraphCommand[Literal["3"]]: + nonlocal node2_currently, node2_max_currently + node2_currently += 1 + if node2_currently > node2_max_currently: + node2_max_currently = node2_currently + await asyncio.sleep(0.1) + node2_currently -= 1 + + return GraphCommand(update=[state], goto="3") + + async def node3(state) -> Literal["3"]: + return ["3"] + + builder = StateGraph(Annotated[list, operator.add]) + builder.add_node("1", node1) + builder.add_node("2", node2) + builder.add_node("3", node3) + builder.add_edge(START, "1") + graph = builder.compile() + + assert ( + graph.get_graph().draw_mermaid() + == """%%{init: {'flowchart': {'curve': 'linear'}}}%% +graph TD; + __start__([

__start__

]):::first + 1(1) + 2(2) + 3([3]):::last + __start__ --> 1; + 1 -.-> 2; + 2 -.-> 3; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc +""" + ) + + assert await graph.ainvoke(["0"], debug=True) == ["0", "1", *range(100), "3"] + assert node2_max_currently == 100 + assert node2_currently == 0 + node2_max_currently = 0 + + assert await graph.ainvoke(["0"], {"max_concurrency": 10}) == [ + "0", + "1", + *range(100), + "3", + ] + assert node2_max_currently == 10 + assert node2_currently == 0 + + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer, interrupt_before=["2"]) + thread1 = {"max_concurrency": 10, "configurable": {"thread_id": "1"}} + + assert await graph.ainvoke(["0"], thread1) == ["0", "1"] + assert await graph.ainvoke(None, thread1) == ["0", "1", *range(100), "3"] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_invoke_checkpoint_three( + mocker: MockerFixture, checkpointer_name: str +) -> None: + add_one = mocker.Mock(side_effect=lambda x: x["total"] + x["input"]) + + def raise_if_above_10(input: int) -> int: + if input > 10: + raise ValueError("Input is too large") + return input + + one = ( + Channel.subscribe_to(["input"]).join(["total"]) + | add_one + | Channel.write_to("output", "total") + | raise_if_above_10 + ) + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app = Pregel( + nodes={"one": one}, + channels={ + "total": BinaryOperatorAggregate(int, operator.add), + "input": LastValue(int), + "output": LastValue(int), + }, + input_channels="input", + output_channels="output", + checkpointer=checkpointer, + debug=True, + ) + + thread_1 = {"configurable": {"thread_id": "1"}} + # total starts out as 0, so output is 0+2=2 + assert await app.ainvoke(2, thread_1) == 2 + state = await app.aget_state(thread_1) + assert state is not None + assert state.values.get("total") == 2 + assert ( + state.config["configurable"]["checkpoint_id"] + == (await checkpointer.aget(thread_1))["id"] + ) + # total is now 2, so output is 2+3=5 + assert await app.ainvoke(3, thread_1) == 5 + state = await app.aget_state(thread_1) + assert state is not None + assert state.values.get("total") == 7 + assert ( + state.config["configurable"]["checkpoint_id"] + == (await checkpointer.aget(thread_1))["id"] + ) + # total is now 2+5=7, so output would be 7+4=11, but raises ValueError + with pytest.raises(ValueError): + await app.ainvoke(4, thread_1) + # checkpoint is not updated + state = await app.aget_state(thread_1) + assert state is not None + assert state.values.get("total") == 7 + assert state.next == ("one",) + """we checkpoint inputs and it failed on "one", so the next node is one""" + # we can recover from error by sending new inputs + assert await app.ainvoke(2, thread_1) == 9 + state = await app.aget_state(thread_1) + assert state is not None + assert state.values.get("total") == 16, "total is now 7+9=16" + assert state.next == () + + thread_2 = {"configurable": {"thread_id": "2"}} + # on a new thread, total starts out as 0, so output is 0+5=5 + assert await app.ainvoke(5, thread_2) == 5 + state = await app.aget_state({"configurable": {"thread_id": "1"}}) + assert state is not None + assert state.values.get("total") == 16 + assert state.next == () + state = await app.aget_state(thread_2) + assert state is not None + assert state.values.get("total") == 5 + assert state.next == () + + assert len([c async for c in app.aget_state_history(thread_1, limit=1)]) == 1 + # list all checkpoints for thread 1 + thread_1_history = [c async for c in app.aget_state_history(thread_1)] + # there are 7 checkpoints + assert len(thread_1_history) == 7 + assert Counter(c.metadata["source"] for c in thread_1_history) == { + "input": 4, + "loop": 3, + } + # sorted descending + assert ( + thread_1_history[0].config["configurable"]["checkpoint_id"] + > thread_1_history[1].config["configurable"]["checkpoint_id"] + ) + # cursor pagination + cursored = [ + c + async for c in app.aget_state_history( + thread_1, limit=1, before=thread_1_history[0].config + ) + ] + assert len(cursored) == 1 + assert cursored[0].config == thread_1_history[1].config + # the last checkpoint + assert thread_1_history[0].values["total"] == 16 + # the first "loop" checkpoint + assert thread_1_history[-2].values["total"] == 2 + # can get each checkpoint using aget with config + assert (await checkpointer.aget(thread_1_history[0].config))[ + "id" + ] == thread_1_history[0].config["configurable"]["checkpoint_id"] + assert (await checkpointer.aget(thread_1_history[1].config))[ + "id" + ] == thread_1_history[1].config["configurable"]["checkpoint_id"] + + thread_1_next_config = await app.aupdate_state(thread_1_history[1].config, 10) + # update creates a new checkpoint + assert ( + thread_1_next_config["configurable"]["checkpoint_id"] + > thread_1_history[0].config["configurable"]["checkpoint_id"] + ) + # 1 more checkpoint in history + assert len([c async for c in app.aget_state_history(thread_1)]) == 8 + assert Counter( + [c.metadata["source"] async for c in app.aget_state_history(thread_1)] + ) == { + "update": 1, + "input": 4, + "loop": 3, + } + # the latest checkpoint is the updated one + assert await app.aget_state(thread_1) == await app.aget_state( + thread_1_next_config + ) + + +async def test_invoke_two_processes_two_in_join_two_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + add_10_each = mocker.Mock(side_effect=lambda x: sorted(y + 10 for y in x)) + + one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + chain_three = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + chain_four = ( + Channel.subscribe_to("inbox") | add_10_each | Channel.write_to("output") + ) + + app = Pregel( + nodes={ + "one": one, + "chain_three": chain_three, + "chain_four": chain_four, + }, + channels={ + "inbox": Topic(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + + # Then invoke app + # We get a single array result as chain_four waits for all publishers to finish + # before operating on all elements published to topic_two as an array + for _ in range(100): + assert await app.ainvoke(2) == [13, 13] + + assert await asyncio.gather(*(app.ainvoke(2) for _ in range(100))) == [ + [13, 13] for _ in range(100) + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_invoke_join_then_call_other_pregel( + mocker: MockerFixture, checkpointer_name: str +) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + add_10_each = mocker.Mock(side_effect=lambda x: [y + 10 for y in x]) + + inner_app = Pregel( + nodes={ + "one": Channel.subscribe_to("input") | add_one | Channel.write_to("output") + }, + channels={ + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + + one = ( + Channel.subscribe_to("input") + | add_10_each + | Channel.write_to("inbox_one").map() + ) + two = ( + Channel.subscribe_to("inbox_one") + | inner_app.map() + | sorted + | Channel.write_to("outbox_one") + ) + chain_three = Channel.subscribe_to("outbox_one") | sum | Channel.write_to("output") + + app = Pregel( + nodes={ + "one": one, + "two": two, + "chain_three": chain_three, + }, + channels={ + "inbox_one": Topic(int), + "outbox_one": LastValue(int), + "output": LastValue(int), + "input": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + + # Then invoke pubsub + for _ in range(10): + assert await app.ainvoke([2, 3]) == 27 + + assert await asyncio.gather(*(app.ainvoke([2, 3]) for _ in range(10))) == [ + 27 for _ in range(10) + ] + + async with awith_checkpointer(checkpointer_name) as checkpointer: + # add checkpointer + app.checkpointer = checkpointer + # subgraph is called twice in the same node, through .map(), so raises + with pytest.raises(MultipleSubgraphsError): + await app.ainvoke([2, 3], {"configurable": {"thread_id": "1"}}) + + # set inner graph checkpointer NeverCheckpoint + inner_app.checkpointer = False + # subgraph still called twice, but checkpointing for inner graph is disabled + assert await app.ainvoke([2, 3], {"configurable": {"thread_id": "1"}}) == 27 + + +async def test_invoke_two_processes_one_in_two_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + + one = ( + Channel.subscribe_to("input") | add_one | Channel.write_to("output", "between") + ) + two = Channel.subscribe_to("between") | add_one | Channel.write_to("output") + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "input": LastValue(int), + "between": LastValue(int), + "output": LastValue(int), + }, + stream_channels=["output", "between"], + input_channels="input", + output_channels="output", + ) + + # Then invoke pubsub + assert [c async for c in app.astream(2)] == [ + {"between": 3, "output": 3}, + {"between": 3, "output": 4}, + ] + + +async def test_invoke_two_processes_no_out(mocker: MockerFixture) -> None: + add_one = mocker.Mock(side_effect=lambda x: x + 1) + one = Channel.subscribe_to("input") | add_one | Channel.write_to("between") + two = Channel.subscribe_to("between") | add_one + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "input": LastValue(int), + "between": LastValue(int), + "output": LastValue(int), + }, + input_channels="input", + output_channels="output", + ) + + # It finishes executing (once no more messages being published) + # but returns nothing, as nothing was published to "output" topic + assert await app.ainvoke(2) is None + + +async def test_channel_enter_exit_timing(mocker: MockerFixture) -> None: + setup_sync = mocker.Mock() + cleanup_sync = mocker.Mock() + setup_async = mocker.Mock() + cleanup_async = mocker.Mock() + + @contextmanager + def an_int() -> Generator[int, None, None]: + setup_sync() + try: + yield 5 + finally: + cleanup_sync() + + @asynccontextmanager + async def an_int_async() -> AsyncGenerator[int, None]: + setup_async() + try: + yield 5 + finally: + cleanup_async() + + add_one = mocker.Mock(side_effect=lambda x: x + 1) + one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox") + two = ( + Channel.subscribe_to("inbox") + | RunnableLambda(add_one).abatch + | Channel.write_to("output").abatch + ) + + app = Pregel( + nodes={"one": one, "two": two}, + channels={ + "input": LastValue(int), + "output": LastValue(int), + "inbox": Topic(int), + "ctx": Context(an_int, an_int_async), + }, + input_channels="input", + output_channels=["inbox", "output"], + stream_channels=["inbox", "output"], + ) + + async def aenumerate(aiter: AsyncIterator[Any]) -> AsyncIterator[tuple[int, Any]]: + i = 0 + async for chunk in aiter: + yield i, chunk + i += 1 + + assert setup_sync.call_count == 0 + assert cleanup_sync.call_count == 0 + assert setup_async.call_count == 0 + assert cleanup_async.call_count == 0 + async for i, chunk in aenumerate(app.astream(2)): + assert setup_sync.call_count == 0, "Sync context manager should not be used" + assert cleanup_sync.call_count == 0, "Sync context manager should not be used" + assert setup_async.call_count == 1, "Expected setup to be called once" + if i == 0: + assert chunk == {"inbox": [3]} + elif i == 1: + assert chunk == {"output": 4} + else: + pytest.fail("Expected only two chunks") + assert setup_sync.call_count == 0 + assert cleanup_sync.call_count == 0 + assert setup_async.call_count == 1, "Expected setup to be called once" + assert cleanup_async.call_count == 1, "Expected cleanup to be called once" + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_conditional_graph(checkpointer_name: str) -> None: + from langchain_core.agents import AgentAction, AgentFinish + from langchain_core.language_models.fake import FakeStreamingListLLM + from langchain_core.prompts import PromptTemplate + from langchain_core.runnables import RunnablePassthrough + from langchain_core.tools import tool + + # Assemble the tools + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + # Construct the agent + prompt = PromptTemplate.from_template("Hello!") + + llm = FakeStreamingListLLM( + responses=[ + "tool:search_api:query", + "tool:search_api:another", + "finish:answer", + ] + ) + + async def agent_parser(input: str) -> Union[AgentAction, AgentFinish]: + if input.startswith("finish"): + _, answer = input.split(":") + return AgentFinish(return_values={"answer": answer}, log=input) + else: + _, tool_name, tool_input = input.split(":") + return AgentAction(tool=tool_name, tool_input=tool_input, log=input) + + agent = RunnablePassthrough.assign(agent_outcome=prompt | llm | agent_parser) + + # Define tool execution logic + async def execute_tools(data: dict) -> dict: + data = data.copy() + agent_action: AgentAction = data.pop("agent_outcome") + observation = await {t.name: t for t in tools}[agent_action.tool].ainvoke( + agent_action.tool_input + ) + if data.get("intermediate_steps") is None: + data["intermediate_steps"] = [] + else: + data["intermediate_steps"] = data["intermediate_steps"].copy() + data["intermediate_steps"].append([agent_action, observation]) + return data + + # Define decision-making logic + async def should_continue(data: dict, config: RunnableConfig) -> str: + # Logic to decide whether to continue in the loop or exit + if isinstance(data["agent_outcome"], AgentFinish): + return "exit" + else: + return "continue" + + # Define a new graph + workflow = Graph() + + workflow.add_node("agent", agent) + workflow.add_node("tools", execute_tools) + + workflow.set_entry_point("agent") + + workflow.add_conditional_edges( + "agent", should_continue, {"continue": "tools", "exit": END} + ) + + workflow.add_edge("tools", "agent") + + app = workflow.compile() + + assert await app.ainvoke({"input": "what is weather in sf"}) == { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + } + + assert [c async for c in app.astream({"input": "what is weather in sf"})] == [ + { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + } + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + } + }, + ] + + patches = [c async for c in app.astream_log({"input": "what is weather in sf"})] + patch_paths = {op["path"] for log in patches for op in log.ops} + + # Check that agent (one of the nodes) has its output streamed to the logs + assert "/logs/agent/streamed_output/-" in patch_paths + assert "/logs/agent:2/streamed_output/-" in patch_paths + assert "/logs/agent:3/streamed_output/-" in patch_paths + # Check that agent (one of the nodes) has its final output set in the logs + assert "/logs/agent/final_output" in patch_paths + assert "/logs/agent:2/final_output" in patch_paths + assert "/logs/agent:3/final_output" in patch_paths + assert [ + p["value"] + for log in patches + for p in log.ops + if p["path"] == "/logs/agent/final_output" + or p["path"] == "/logs/agent:2/final_output" + or p["path"] == "/logs/agent:3/final_output" + ] == [ + { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + }, + { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + }, + { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + }, + ] + + async with awith_checkpointer(checkpointer_name) as checkpointer: + # test state get/update methods with interrupt_after + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["agent"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c + async for c in app_w_interrupt.astream( + {"input": "what is weather in sf"}, config + ) + ] == [ + { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + } + ] + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + }, + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": { + "agent": { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + } + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + await app_w_interrupt.aupdate_state( + config, + { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + ) + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + } + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + ] + + await app_w_interrupt.aupdate_state( + config, + { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + }, + ) + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + }, + }, + tasks=(), + next=(), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 4, + "writes": { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + } + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + # test state get/update methods with interrupt_before + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before=["tools"], + ) + config = {"configurable": {"thread_id": "2"}} + llm.i = 0 + + assert [ + c + async for c in app_w_interrupt.astream( + {"input": "what is weather in sf"}, config + ) + ] == [ + { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + } + ] + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + }, + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": { + "agent": { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + } + }, + "thread_id": "2", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + await app_w_interrupt.aupdate_state( + config, + { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + ) + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + } + }, + "thread_id": "2", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "input": "what is weather in sf", + }, + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + ] + + await app_w_interrupt.aupdate_state( + config, + { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + }, + ) + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + }, + }, + tasks=(), + next=(), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 4, + "writes": { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + } + }, + "thread_id": "2", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + # test re-invoke to continue with interrupt_before + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before=["tools"], + ) + config = {"configurable": {"thread_id": "3"}} + llm.i = 0 # reset the llm + + assert [ + c + async for c in app_w_interrupt.astream( + {"input": "what is weather in sf"}, config + ) + ] == [ + { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + } + ] + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + }, + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": { + "agent": { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + } + }, + "thread_id": "3", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + { + "agent": { + "input": "what is weather in sf", + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + }, + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + ] + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + { + "tools": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + } + }, + { + "agent": { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + } + }, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_conditional_graph_state( + mocker: MockerFixture, checkpointer_name: str +) -> None: + from langchain_core.agents import AgentAction, AgentFinish + from langchain_core.language_models.fake import FakeStreamingListLLM + from langchain_core.prompts import PromptTemplate + from langchain_core.tools import tool + + setup = mocker.Mock() + teardown = mocker.Mock() + + @asynccontextmanager + async def assert_ctx_once() -> AsyncIterator[None]: + assert setup.call_count == 0 + assert teardown.call_count == 0 + try: + yield + finally: + assert setup.call_count == 1 + assert teardown.call_count == 1 + setup.reset_mock() + teardown.reset_mock() + + class MyPydanticContextModel(BaseModel, arbitrary_types_allowed=True): + session: httpx.AsyncClient + something_else: str + + @asynccontextmanager + async def make_context( + config: RunnableConfig, + ) -> AsyncIterator[MyPydanticContextModel]: + assert isinstance(config, dict) + setup() + session = httpx.AsyncClient() + try: + yield MyPydanticContextModel(session=session, something_else="hello") + finally: + await session.aclose() + teardown() + + class AgentState(TypedDict): + input: Annotated[str, UntrackedValue] + agent_outcome: Optional[Union[AgentAction, AgentFinish]] + intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add] + context: Annotated[MyPydanticContextModel, Context(make_context)] + + # Assemble the tools + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + # Construct the agent + prompt = PromptTemplate.from_template("Hello!") + + llm = FakeStreamingListLLM( + responses=[ + "tool:search_api:query", + "tool:search_api:another", + "finish:answer", + ] + ) + + def agent_parser(input: str) -> dict[str, Union[AgentAction, AgentFinish]]: + if input.startswith("finish"): + _, answer = input.split(":") + return { + "agent_outcome": AgentFinish( + return_values={"answer": answer}, log=input + ) + } + else: + _, tool_name, tool_input = input.split(":") + return { + "agent_outcome": AgentAction( + tool=tool_name, tool_input=tool_input, log=input + ) + } + + agent = prompt | llm | agent_parser + + # Define tool execution logic + def execute_tools(data: AgentState) -> dict: + # check we have httpx session in AgentState + assert isinstance(data["context"], MyPydanticContextModel) + # execute the tool + agent_action: AgentAction = data.pop("agent_outcome") + observation = {t.name: t for t in tools}[agent_action.tool].invoke( + agent_action.tool_input + ) + return {"intermediate_steps": [[agent_action, observation]]} + + # Define decision-making logic + def should_continue(data: AgentState) -> str: + # check we have httpx session in AgentState + assert isinstance(data["context"], MyPydanticContextModel) + # Logic to decide whether to continue in the loop or exit + if isinstance(data["agent_outcome"], AgentFinish): + return "exit" + else: + return "continue" + + # Define a new graph + workflow = StateGraph(AgentState) + + workflow.add_node("agent", agent) + workflow.add_node("tools", execute_tools) + + workflow.set_entry_point("agent") + + workflow.add_conditional_edges( + "agent", should_continue, {"continue": "tools", "exit": END} + ) + + workflow.add_edge("tools", "agent") + + app = workflow.compile() + + async with assert_ctx_once(): + assert await app.ainvoke({"input": "what is weather in sf"}) == { + "input": "what is weather in sf", + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ], + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + } + + async with assert_ctx_once(): + assert [c async for c in app.astream({"input": "what is weather in sf"})] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + "result for another", + ], + ], + } + }, + { + "agent": { + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + } + }, + ] + + async with assert_ctx_once(): + patches = [c async for c in app.astream_log({"input": "what is weather in sf"})] + patch_paths = {op["path"] for log in patches for op in log.ops} + + # Check that agent (one of the nodes) has its output streamed to the logs + assert "/logs/agent/streamed_output/-" in patch_paths + # Check that agent (one of the nodes) has its final output set in the logs + assert "/logs/agent/final_output" in patch_paths + assert [ + p["value"] + for log in patches + for p in log.ops + if p["path"] == "/logs/agent/final_output" + or p["path"] == "/logs/agent:2/final_output" + or p["path"] == "/logs/agent:3/final_output" + ] == [ + { + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ) + }, + { + "agent_outcome": AgentAction( + tool="search_api", tool_input="another", log="tool:search_api:another" + ) + }, + { + "agent_outcome": AgentFinish( + return_values={"answer": "answer"}, log="finish:answer" + ), + }, + ] + + async with awith_checkpointer(checkpointer_name) as checkpointer: + # test state get/update methods with interrupt_after + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["agent"], + ) + config = {"configurable": {"thread_id": "1"}} + + async with assert_ctx_once(): + assert [ + c + async for c in app_w_interrupt.astream( + {"input": "what is weather in sf"}, config + ) + ] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + {"__interrupt__": ()}, + ] + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + "intermediate_steps": [], + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + async with assert_ctx_once(): + await app_w_interrupt.aupdate_state( + config, + { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ) + }, + ) + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "intermediate_steps": [], + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 2, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ) + } + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + async with assert_ctx_once(): + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + {"__interrupt__": ()}, + ] + + async with assert_ctx_once(): + await app_w_interrupt.aupdate_state( + config, + { + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ) + }, + ) + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + }, + tasks=(), + next=(), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 5, + "writes": { + "agent": { + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ) + } + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + # test state get/update methods with interrupt_before + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before=["tools"], + ) + config = {"configurable": {"thread_id": "2"}} + llm.i = 0 # reset the llm + + assert [ + c + async for c in app_w_interrupt.astream( + {"input": "what is weather in sf"}, config + ) + ] == [ + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + {"__interrupt__": ()}, + ] + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", tool_input="query", log="tool:search_api:query" + ), + "intermediate_steps": [], + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:query", + ), + } + }, + "thread_id": "2", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + await app_w_interrupt.aupdate_state( + config, + { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ) + }, + ) + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "intermediate_steps": [], + }, + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 2, + "writes": { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ) + } + }, + "thread_id": "2", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + { + "tools": { + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + } + }, + { + "agent": { + "agent_outcome": AgentAction( + tool="search_api", + tool_input="another", + log="tool:search_api:another", + ), + } + }, + {"__interrupt__": ()}, + ] + + await app_w_interrupt.aupdate_state( + config, + { + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ) + }, + ) + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ), + "intermediate_steps": [ + [ + AgentAction( + tool="search_api", + tool_input="query", + log="tool:search_api:a different query", + ), + "result for query", + ] + ], + }, + tasks=(), + next=(), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 5, + "writes": { + "agent": { + "agent_outcome": AgentFinish( + return_values={"answer": "a really nice answer"}, + log="finish:a really nice answer", + ) + } + }, + "thread_id": "2", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + +async def test_conditional_entrypoint_graph() -> None: + async def left(data: str) -> str: + return data + "->left" + + async def right(data: str) -> str: + return data + "->right" + + def should_start(data: str) -> str: + # Logic to decide where to start + if len(data) > 10: + return "go-right" + else: + return "go-left" + + # Define a new graph + workflow = Graph() + + workflow.add_node("left", left) + workflow.add_node("right", right) + + workflow.set_conditional_entry_point( + should_start, {"go-left": "left", "go-right": "right"} + ) + + workflow.add_conditional_edges("left", lambda data: END) + workflow.add_edge("right", END) + + app = workflow.compile() + + assert await app.ainvoke("what is weather in sf") == "what is weather in sf->right" + + assert [c async for c in app.astream("what is weather in sf")] == [ + {"right": "what is weather in sf->right"}, + ] + + +async def test_conditional_entrypoint_graph_state() -> None: + class AgentState(TypedDict, total=False): + input: str + output: str + steps: Annotated[list[str], operator.add] + + async def left(data: AgentState) -> AgentState: + return {"output": data["input"] + "->left"} + + async def right(data: AgentState) -> AgentState: + return {"output": data["input"] + "->right"} + + def should_start(data: AgentState) -> str: + assert data["steps"] == [], "Expected input to be read from the state" + # Logic to decide where to start + if len(data["input"]) > 10: + return "go-right" + else: + return "go-left" + + # Define a new graph + workflow = StateGraph(AgentState) + + workflow.add_node("left", left) + workflow.add_node("right", right) + + workflow.set_conditional_entry_point( + should_start, {"go-left": "left", "go-right": "right"} + ) + + workflow.add_conditional_edges("left", lambda data: END) + workflow.add_edge("right", END) + + app = workflow.compile() + + assert await app.ainvoke({"input": "what is weather in sf"}) == { + "input": "what is weather in sf", + "output": "what is weather in sf->right", + "steps": [], + } + + assert [c async for c in app.astream({"input": "what is weather in sf"})] == [ + {"right": {"output": "what is weather in sf->right"}}, + ] + + +async def test_prebuilt_tool_chat() -> None: + from langchain_core.messages import AIMessage, HumanMessage + from langchain_core.tools import tool + + model = FakeChatModel( + messages=[ + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another"}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one"}, + }, + ], + ), + AIMessage(content="answer"), + ] + ) + + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + app = create_tool_calling_executor(model, tools) + + assert await app.ainvoke( + {"messages": [HumanMessage(content="what is weather in sf")]} + ) == { + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ), + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another"}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call234", + ), + _AnyIdToolMessage( + content="result for a third one", + name="search_api", + tool_call_id="tool_call567", + id=AnyStr(), + ), + _AnyIdAIMessage(content="answer"), + ] + } + + assert [ + c + async for c in app.astream( + {"messages": [HumanMessage(content="what is weather in sf")]}, + stream_mode="messages", + ) + ] == [ + ( + _AnyIdAIMessageChunk( + content="", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "query"}, + "id": "tool_call123", + "type": "tool_call", + } + ], + tool_call_chunks=[ + { + "name": "search_api", + "args": '{"query": "query"}', + "id": "tool_call123", + "index": None, + "type": "tool_call_chunk", + } + ], + ), + { + "langgraph_step": 1, + "langgraph_node": "agent", + "langgraph_triggers": ["start:agent"], + "langgraph_path": ("__pregel_pull", "agent"), + "langgraph_checkpoint_ns": AnyStr("agent:"), + "checkpoint_ns": AnyStr("agent:"), + "ls_provider": "fakechatmodel", + "ls_model_type": "chat", + }, + ), + ( + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ), + { + "langgraph_step": 2, + "langgraph_node": "tools", + "langgraph_triggers": ["branch:agent:should_continue:tools"], + "langgraph_path": ("__pregel_pull", "tools"), + "langgraph_checkpoint_ns": AnyStr("tools:"), + }, + ), + ( + _AnyIdAIMessageChunk( + content="", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "another"}, + "id": "tool_call234", + "type": "tool_call", + }, + { + "name": "search_api", + "args": {"query": "a third one"}, + "id": "tool_call567", + "type": "tool_call", + }, + ], + tool_call_chunks=[ + { + "name": "search_api", + "args": '{"query": "another"}', + "id": "tool_call234", + "index": None, + "type": "tool_call_chunk", + }, + { + "name": "search_api", + "args": '{"query": "a third one"}', + "id": "tool_call567", + "index": None, + "type": "tool_call_chunk", + }, + ], + ), + { + "langgraph_step": 3, + "langgraph_node": "agent", + "langgraph_triggers": ["tools"], + "langgraph_path": ("__pregel_pull", "agent"), + "langgraph_checkpoint_ns": AnyStr("agent:"), + "checkpoint_ns": AnyStr("agent:"), + "ls_provider": "fakechatmodel", + "ls_model_type": "chat", + }, + ), + ( + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call234", + ), + { + "langgraph_step": 4, + "langgraph_node": "tools", + "langgraph_triggers": ["branch:agent:should_continue:tools"], + "langgraph_path": ("__pregel_pull", "tools"), + "langgraph_checkpoint_ns": AnyStr("tools:"), + }, + ), + ( + _AnyIdToolMessage( + content="result for a third one", + name="search_api", + tool_call_id="tool_call567", + ), + { + "langgraph_step": 4, + "langgraph_node": "tools", + "langgraph_triggers": ["branch:agent:should_continue:tools"], + "langgraph_path": ("__pregel_pull", "tools"), + "langgraph_checkpoint_ns": AnyStr("tools:"), + }, + ), + ( + _AnyIdAIMessageChunk( + content="answer", + ), + { + "langgraph_step": 5, + "langgraph_node": "agent", + "langgraph_triggers": ["tools"], + "langgraph_path": ("__pregel_pull", "agent"), + "langgraph_checkpoint_ns": AnyStr("agent:"), + "checkpoint_ns": AnyStr("agent:"), + "ls_provider": "fakechatmodel", + "ls_model_type": "chat", + }, + ), + ] + + assert [ + c + async for c in app.astream( + {"messages": [HumanMessage(content="what is weather in sf")]} + ) + ] == [ + { + "agent": { + "messages": [ + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ) + ] + } + }, + { + "tools": { + "messages": [ + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ) + ] + } + }, + { + "agent": { + "messages": [ + _AnyIdAIMessage( + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another"}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one"}, + }, + ], + ) + ] + } + }, + { + "tools": { + "messages": [ + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call234", + ), + _AnyIdToolMessage( + content="result for a third one", + name="search_api", + tool_call_id="tool_call567", + ), + ] + } + }, + {"agent": {"messages": [_AnyIdAIMessage(content="answer")]}}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_state_graph_packets(checkpointer_name: str) -> None: + from langchain_core.language_models.fake_chat_models import ( + FakeMessagesListChatModel, + ) + from langchain_core.messages import ( + AIMessage, + BaseMessage, + HumanMessage, + ToolMessage, + ) + from langchain_core.tools import tool + + class AgentState(TypedDict): + messages: Annotated[list[BaseMessage], add_messages] + session: Annotated[httpx.AsyncClient, Context(httpx.AsyncClient)] + + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + tools_by_name = {t.name: t for t in tools} + + model = FakeMessagesListChatModel( + responses=[ + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ), + AIMessage(id="ai3", content="answer"), + ] + ) + + # Define decision-making logic + def should_continue(data: AgentState) -> str: + assert isinstance(data["session"], httpx.AsyncClient) + # Logic to decide whether to continue in the loop or exit + if tool_calls := data["messages"][-1].tool_calls: + return [Send("tools", tool_call) for tool_call in tool_calls] + else: + return END + + async def tools_node(input: ToolCall, config: RunnableConfig) -> AgentState: + await asyncio.sleep(input["args"].get("idx", 0) / 10) + output = await tools_by_name[input["name"]].ainvoke(input["args"], config) + return { + "messages": ToolMessage( + content=output, name=input["name"], tool_call_id=input["id"] + ) + } + + # Define a new graph + workflow = StateGraph(AgentState) + + # Define the two nodes we will cycle between + workflow.add_node("agent", {"messages": RunnablePick("messages") | model}) + workflow.add_node("tools", tools_node) + + # Set the entrypoint as `agent` + # This means that this node is the first one called + workflow.set_entry_point("agent") + + # We now add a conditional edge + workflow.add_conditional_edges("agent", should_continue) + + # We now add a normal edge from `tools` to `agent`. + # This means that after `tools` is called, `agent` node is called next. + workflow.add_edge("tools", "agent") + + # Finally, we compile it! + # This compiles it into a LangChain Runnable, + # meaning you can use it as you would any other runnable + app = workflow.compile() + + assert await app.ainvoke( + {"messages": HumanMessage(content="what is weather in sf")} + ) == { + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ), + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call234", + ), + _AnyIdToolMessage( + content="result for a third one", + name="search_api", + tool_call_id="tool_call567", + ), + AIMessage(content="answer", id="ai3"), + ] + } + + assert [ + c + async for c in app.astream( + {"messages": [HumanMessage(content="what is weather in sf")]} + ) + ] == [ + { + "agent": { + "messages": AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ) + }, + }, + { + "tools": { + "messages": _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ) + } + }, + { + "agent": { + "messages": AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ) + } + }, + { + "tools": { + "messages": _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call234", + ) + }, + }, + { + "tools": { + "messages": _AnyIdToolMessage( + content="result for a third one", + name="search_api", + tool_call_id="tool_call567", + ), + }, + }, + {"agent": {"messages": AIMessage(content="answer", id="ai3")}}, + ] + + async with awith_checkpointer(checkpointer_name) as checkpointer: + # interrupt after agent + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["agent"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c + async for c in app_w_interrupt.astream( + {"messages": HumanMessage(content="what is weather in sf")}, config + ) + ] == [ + { + "agent": { + "messages": AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ) + } + }, + {"__interrupt__": ()}, + ] + + if not FF_SEND_V2: + return + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + ] + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + "", + id="ai1", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "query"}, + "id": "tool_call123", + "type": "tool_call", + } + ], + ) + }, + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr()) + ), + ), + next=("tools",), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + # modify ai message + last_message = (await app_w_interrupt.aget_state(config)).values["messages"][-1] + last_message.tool_calls[0]["args"]["query"] = "a different query" + await app_w_interrupt.aupdate_state(config, {"messages": last_message}) + + # message was replaced instead of appended + tup = await app_w_interrupt.checkpointer.aget_tuple(config) + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + ] + }, + tasks=(PregelTask(AnyStr(), "tools", (PUSH, (), 0, AnyStr())),), + next=("tools",), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": { + "agent": { + "messages": AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ) + } + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + { + "tools": { + "messages": _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ) + } + }, + { + "agent": { + "messages": AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ) + }, + }, + {"__interrupt__": ()}, + ] + + tup = await app_w_interrupt.checkpointer.aget_tuple(config) + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ), + ] + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + "", + id="ai2", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "another", "idx": 0}, + "id": "tool_call234", + "type": "tool_call", + }, + { + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + "id": "tool_call567", + "type": "tool_call", + }, + ], + ) + }, + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr()) + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 3, AnyStr()) + ), + ), + next=("tools", "tools"), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 2, + "writes": { + "tools": { + "messages": _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + }, + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + await app_w_interrupt.aupdate_state( + config, + {"messages": AIMessage(content="answer", id="ai2")}, + ) + + # replaces message even if object identity is different, as long as id is the same + tup = await app_w_interrupt.checkpointer.aget_tuple(config) + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage(content="answer", id="ai2"), + ] + }, + tasks=(), + next=(), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 3, + "writes": { + "agent": { + "messages": AIMessage(content="answer", id="ai2"), + } + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + # interrupt before tools + + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_before=["tools"], + ) + config = {"configurable": {"thread_id": "2"}} + model.i = 0 + + assert [ + c + async for c in app_w_interrupt.astream( + {"messages": HumanMessage(content="what is weather in sf")}, config + ) + ] == [ + { + "agent": { + "messages": AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ) + } + }, + {"__interrupt__": ()}, + ] + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + ] + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + content="", + additional_kwargs={}, + response_metadata={}, + id="ai1", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "query"}, + "id": "tool_call123", + "type": "tool_call", + } + ], + ) + }, + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr()) + ), + ), + next=("tools",), + config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config, + created_at=( + await app_w_interrupt.checkpointer.aget_tuple(config) + ).checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "2", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + # modify ai message + last_message = (await app_w_interrupt.aget_state(config)).values["messages"][-1] + last_message.tool_calls[0]["args"]["query"] = "a different query" + await app_w_interrupt.aupdate_state(config, {"messages": last_message}) + + # message was replaced instead of appended + tup = await app_w_interrupt.checkpointer.aget_tuple(config) + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + ] + }, + tasks=(PregelTask(AnyStr(), "tools", (PUSH, (), 0, AnyStr())),), + next=("tools",), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": { + "agent": { + "messages": AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ) + } + }, + "thread_id": "2", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + { + "tools": { + "messages": _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ) + } + }, + { + "agent": { + "messages": AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ) + }, + }, + {"__interrupt__": ()}, + ] + + tup = await app_w_interrupt.checkpointer.aget_tuple(config) + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + id="ai2", + content="", + tool_calls=[ + { + "id": "tool_call234", + "name": "search_api", + "args": {"query": "another", "idx": 0}, + }, + { + "id": "tool_call567", + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + }, + ], + ), + ] + }, + tasks=( + PregelTask( + id=AnyStr(), + name="agent", + path=("__pregel_pull", "agent"), + error=None, + interrupts=(), + state=None, + result={ + "messages": AIMessage( + content="", + additional_kwargs={}, + response_metadata={}, + id="ai2", + tool_calls=[ + { + "name": "search_api", + "args": {"query": "another", "idx": 0}, + "id": "tool_call234", + "type": "tool_call", + }, + { + "name": "search_api", + "args": {"query": "a third one", "idx": 1}, + "id": "tool_call567", + "type": "tool_call", + }, + ], + ) + }, + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr()) + ), + PregelTask( + AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 3, AnyStr()) + ), + ), + next=("tools", "tools"), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 2, + "writes": { + "tools": { + "messages": _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + }, + }, + "thread_id": "2", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + await app_w_interrupt.aupdate_state( + config, + {"messages": AIMessage(content="answer", id="ai2")}, + ) + + # replaces message even if object identity is different, as long as id is the same + tup = await app_w_interrupt.checkpointer.aget_tuple(config) + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + id="ai1", + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + }, + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage(content="answer", id="ai2"), + ] + }, + tasks=(), + next=(), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 3, + "writes": { + "agent": { + "messages": AIMessage(content="answer", id="ai2"), + } + }, + "thread_id": "2", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_message_graph(checkpointer_name: str) -> None: + from langchain_core.language_models.fake_chat_models import ( + FakeMessagesListChatModel, + ) + from langchain_core.messages import AIMessage, HumanMessage + from langchain_core.tools import tool + + class FakeFuntionChatModel(FakeMessagesListChatModel): + def bind_functions(self, functions: list): + return self + + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + model = FakeFuntionChatModel( + responses=[ + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ), + AIMessage(content="answer", id="ai3"), + ] + ) + + # Define the function that determines whether to continue or not + def should_continue(messages): + last_message = messages[-1] + # If there is no function call, then we finish + if not last_message.tool_calls: + return "end" + # Otherwise if there is, we continue + else: + return "continue" + + # Define a new graph + workflow = MessageGraph() + + # Define the two nodes we will cycle between + workflow.add_node("agent", model) + workflow.add_node("tools", ToolNode(tools)) + + # Set the entrypoint as `agent` + # This means that this node is the first one called + workflow.set_entry_point("agent") + + # We now add a conditional edge + workflow.add_conditional_edges( + # First, we define the start node. We use `agent`. + # This means these are the edges taken after the `agent` node is called. + "agent", + # Next, we pass in the function that will determine which node is called next. + should_continue, + # Finally we pass in a mapping. + # The keys are strings, and the values are other nodes. + # END is a special node marking that the graph should finish. + # What will happen is we will call `should_continue`, and then the output of that + # will be matched against the keys in this mapping. + # Based on which one it matches, that node will then be called. + { + # If `tools`, then we call the tool node. + "continue": "tools", + # Otherwise we finish. + "end": END, + }, + ) + + # We now add a normal edge from `tools` to `agent`. + # This means that after `tools` is called, `agent` node is called next. + workflow.add_edge("tools", "agent") + + # Finally, we compile it! + # This compiles it into a LangChain Runnable, + # meaning you can use it as you would any other runnable + app = workflow.compile() + + assert await app.ainvoke(HumanMessage(content="what is weather in sf")) == [ + _AnyIdHumanMessage( + content="what is weather in sf", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", # respects ids passed in + ), + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ), + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call456", + ), + AIMessage(content="answer", id="ai3"), + ] + + assert [ + c async for c in app.astream([HumanMessage(content="what is weather in sf")]) + ] == [ + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + { + "tools": [ + _AnyIdToolMessage( + content="result for query", + name="search_api", + tool_call_id="tool_call123", + ) + ] + }, + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + { + "tools": [ + _AnyIdToolMessage( + content="result for another", + name="search_api", + tool_call_id="tool_call456", + ) + ] + }, + {"agent": AIMessage(content="answer", id="ai3")}, + ] + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["agent"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c + async for c in app_w_interrupt.astream( + HumanMessage(content="what is weather in sf"), config + ) + ] == [ + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + {"__interrupt__": ()}, + ] + + tup = await app_w_interrupt.checkpointer.aget_tuple(config) + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + } + ], + id="ai1", + ) + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + # modify ai message + last_message = (await app_w_interrupt.aget_state(config)).values[-1] + last_message.tool_calls[0]["args"] = {"query": "a different query"} + await app_w_interrupt.aupdate_state(config, last_message) + + # message was replaced instead of appended + tup = await app_w_interrupt.checkpointer.aget_tuple(config) + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 2, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + id="ai1", + ) + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + { + "tools": [ + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ) + ] + }, + { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + {"__interrupt__": ()}, + ] + + tup = await app_w_interrupt.checkpointer.aget_tuple(config) + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ), + ], + tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),), + next=("tools",), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "loop", + "step": 4, + "writes": { + "agent": AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call456", + "name": "search_api", + "args": {"query": "another"}, + } + ], + id="ai2", + ) + }, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + await app_w_interrupt.aupdate_state( + config, + AIMessage(content="answer", id="ai2"), + ) + + # replaces message even if object identity is different, as long as id is the same + tup = await app_w_interrupt.checkpointer.aget_tuple(config) + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values=[ + _AnyIdHumanMessage(content="what is weather in sf"), + AIMessage( + content="", + id="ai1", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "a different query"}, + } + ], + ), + _AnyIdToolMessage( + content="result for a different query", + name="search_api", + tool_call_id="tool_call123", + ), + AIMessage(content="answer", id="ai2"), + ], + tasks=(), + next=(), + config=tup.config, + created_at=tup.checkpoint["ts"], + metadata={ + "parents": {}, + "source": "update", + "step": 5, + "writes": {"agent": AIMessage(content="answer", id="ai2")}, + "thread_id": "1", + }, + parent_config=[ + c async for c in app_w_interrupt.checkpointer.alist(config, limit=2) + ][-1].config, + ) + + +async def test_in_one_fan_out_out_one_graph_state() -> None: + def sorted_add(x: list[str], y: list[str]) -> list[str]: + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], operator.add] + + async def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + async def retriever_one(data: State) -> State: + await asyncio.sleep(0.1) + return {"docs": ["doc1", "doc2"]} + + async def retriever_two(data: State) -> State: + return {"docs": ["doc3", "doc4"]} + + async def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "retriever_one") + workflow.add_edge("rewrite_query", "retriever_two") + workflow.add_edge("retriever_one", "qa") + workflow.add_edge("retriever_two", "qa") + workflow.set_finish_point("qa") + + app = workflow.compile() + + assert await app.ainvoke({"query": "what is weather in sf"}) == { + "query": "query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + } + + assert [c async for c in app.astream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + assert [ + c + async for c in app.astream( + {"query": "what is weather in sf"}, stream_mode="values" + ) + ] == [ + {"query": "what is weather in sf", "docs": []}, + {"query": "query: what is weather in sf", "docs": []}, + { + "query": "query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + }, + { + "query": "query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + }, + ] + + assert [ + c + async for c in app.astream( + {"query": "what is weather in sf"}, + stream_mode=["values", "updates", "debug"], + ) + ] == [ + ("values", {"query": "what is weather in sf", "docs": []}), + ( + "debug", + { + "type": "task", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "rewrite_query", + "input": {"query": "what is weather in sf", "docs": []}, + "triggers": ["start:rewrite_query"], + }, + }, + ), + ("updates", {"rewrite_query": {"query": "query: what is weather in sf"}}), + ( + "debug", + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "rewrite_query", + "result": [("query", "query: what is weather in sf")], + "error": None, + "interrupts": [], + }, + }, + ), + ("values", {"query": "query: what is weather in sf", "docs": []}), + ( + "debug", + { + "type": "task", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "retriever_one", + "input": {"query": "query: what is weather in sf", "docs": []}, + "triggers": ["rewrite_query"], + }, + }, + ), + ( + "debug", + { + "type": "task", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "retriever_two", + "input": {"query": "query: what is weather in sf", "docs": []}, + "triggers": ["rewrite_query"], + }, + }, + ), + ( + "updates", + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + ), + ( + "debug", + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "retriever_two", + "result": [("docs", ["doc3", "doc4"])], + "error": None, + "interrupts": [], + }, + }, + ), + ( + "updates", + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + ), + ( + "debug", + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "retriever_one", + "result": [("docs", ["doc1", "doc2"])], + "error": None, + "interrupts": [], + }, + }, + ), + ( + "values", + { + "query": "query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + }, + ), + ( + "debug", + { + "type": "task", + "timestamp": AnyStr(), + "step": 3, + "payload": { + "id": AnyStr(), + "name": "qa", + "input": { + "query": "query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + }, + "triggers": ["retriever_one", "retriever_two"], + }, + }, + ), + ("updates", {"qa": {"answer": "doc1,doc2,doc3,doc4"}}), + ( + "debug", + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 3, + "payload": { + "id": AnyStr(), + "name": "qa", + "result": [("answer", "doc1,doc2,doc3,doc4")], + "error": None, + "interrupts": [], + }, + }, + ), + ( + "values", + { + "query": "query: what is weather in sf", + "answer": "doc1,doc2,doc3,doc4", + "docs": ["doc1", "doc2", "doc3", "doc4"], + }, + ), + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_start_branch_then(checkpointer_name: str) -> None: + class State(TypedDict): + my_key: Annotated[str, operator.add] + market: str + shared: Annotated[dict[str, dict[str, Any]], SharedValue.on("assistant_id")] + other: Annotated[dict[str, dict[str, Any]], SharedValue.on("assistant_id")] + + def assert_shared_value(data: State, config: RunnableConfig) -> State: + assert "shared" in data + if thread_id := config["configurable"].get("thread_id"): + if thread_id == "1": + # this is the first thread, so should not see a value + assert data["shared"] == {} + return {"shared": {"1": {"hello": "world"}}, "other": {"2": {1: 2}}} + elif thread_id == "2": + # this should get value saved by thread 1 + assert data["shared"] == {"1": {"hello": "world"}} + elif thread_id == "3": + # this is a different assistant, so should not see previous value + assert data["shared"] == {} + return {} + + def tool_two_slow(data: State, config: RunnableConfig) -> State: + return {"my_key": " slow", **assert_shared_value(data, config)} + + def tool_two_fast(data: State, config: RunnableConfig) -> State: + return {"my_key": " fast", **assert_shared_value(data, config)} + + tool_two_graph = StateGraph(State) + tool_two_graph.add_node("tool_two_slow", tool_two_slow) + tool_two_graph.add_node("tool_two_fast", tool_two_fast) + tool_two_graph.set_conditional_entry_point( + lambda s: "tool_two_slow" if s["market"] == "DE" else "tool_two_fast", then=END + ) + tool_two = tool_two_graph.compile() + + assert await tool_two.ainvoke({"my_key": "value", "market": "DE"}) == { + "my_key": "value slow", + "market": "DE", + } + assert await tool_two.ainvoke({"my_key": "value", "market": "US"}) == { + "my_key": "value fast", + "market": "US", + } + + async with awith_checkpointer(checkpointer_name) as checkpointer: + tool_two = tool_two_graph.compile( + store=InMemoryStore(), + checkpointer=checkpointer, + interrupt_before=["tool_two_fast", "tool_two_slow"], + ) + + # missing thread_id + with pytest.raises(ValueError, match="thread_id"): + await tool_two.ainvoke({"my_key": "value", "market": "DE"}) + + thread1 = {"configurable": {"thread_id": "1", "assistant_id": "a"}} + # stop when about to enter node + assert await tool_two.ainvoke({"my_key": "value", "market": "DE"}, thread1) == { + "my_key": "value", + "market": "DE", + } + assert [c.metadata async for c in tool_two.checkpointer.alist(thread1)] == [ + { + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "assistant_id": "a", + "thread_id": "1", + }, + { + "parents": {}, + "source": "input", + "step": -1, + "writes": {"__start__": {"my_key": "value", "market": "DE"}}, + "assistant_id": "a", + "thread_id": "1", + }, + ] + assert await tool_two.aget_state(thread1) == StateSnapshot( + values={"my_key": "value", "market": "DE"}, + tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),), + next=("tool_two_slow",), + config=(await tool_two.checkpointer.aget_tuple(thread1)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "assistant_id": "a", + "thread_id": "1", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread1, limit=2) + ][-1].config, + ) + # resume, for same result as above + assert await tool_two.ainvoke(None, thread1, debug=1) == { + "my_key": "value slow", + "market": "DE", + } + assert await tool_two.aget_state(thread1) == StateSnapshot( + values={"my_key": "value slow", "market": "DE"}, + tasks=(), + next=(), + config=(await tool_two.checkpointer.aget_tuple(thread1)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"tool_two_slow": {"my_key": " slow"}}, + "assistant_id": "a", + "thread_id": "1", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread1, limit=2) + ][-1].config, + ) + + thread2 = {"configurable": {"thread_id": "2", "assistant_id": "a"}} + # stop when about to enter node + assert await tool_two.ainvoke({"my_key": "value", "market": "US"}, thread2) == { + "my_key": "value", + "market": "US", + } + assert await tool_two.aget_state(thread2) == StateSnapshot( + values={"my_key": "value", "market": "US"}, + tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),), + next=("tool_two_fast",), + config=(await tool_two.checkpointer.aget_tuple(thread2)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "assistant_id": "a", + "thread_id": "2", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread2, limit=2) + ][-1].config, + ) + # resume, for same result as above + assert await tool_two.ainvoke(None, thread2, debug=1) == { + "my_key": "value fast", + "market": "US", + } + assert await tool_two.aget_state(thread2) == StateSnapshot( + values={"my_key": "value fast", "market": "US"}, + tasks=(), + next=(), + config=(await tool_two.checkpointer.aget_tuple(thread2)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"tool_two_fast": {"my_key": " fast"}}, + "assistant_id": "a", + "thread_id": "2", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread2, limit=2) + ][-1].config, + ) + + thread3 = {"configurable": {"thread_id": "3", "assistant_id": "b"}} + # stop when about to enter node + assert await tool_two.ainvoke({"my_key": "value", "market": "US"}, thread3) == { + "my_key": "value", + "market": "US", + } + assert await tool_two.aget_state(thread3) == StateSnapshot( + values={"my_key": "value", "market": "US"}, + tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),), + next=("tool_two_fast",), + config=(await tool_two.checkpointer.aget_tuple(thread3)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread3)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "assistant_id": "b", + "thread_id": "3", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread3, limit=2) + ][-1].config, + ) + # update state + await tool_two.aupdate_state(thread3, {"my_key": "key"}) # appends to my_key + assert await tool_two.aget_state(thread3) == StateSnapshot( + values={"my_key": "valuekey", "market": "US"}, + tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),), + next=("tool_two_fast",), + config=(await tool_two.checkpointer.aget_tuple(thread3)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread3)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "update", + "step": 1, + "writes": {START: {"my_key": "key"}}, + "assistant_id": "b", + "thread_id": "3", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread3, limit=2) + ][-1].config, + ) + # resume, for same result as above + assert await tool_two.ainvoke(None, thread3, debug=1) == { + "my_key": "valuekey fast", + "market": "US", + } + assert await tool_two.aget_state(thread3) == StateSnapshot( + values={"my_key": "valuekey fast", "market": "US"}, + tasks=(), + next=(), + config=(await tool_two.checkpointer.aget_tuple(thread3)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread3)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 2, + "writes": {"tool_two_fast": {"my_key": " fast"}}, + "assistant_id": "b", + "thread_id": "3", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread3, limit=2) + ][-1].config, + ) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_branch_then(checkpointer_name: str) -> None: + class State(TypedDict): + my_key: Annotated[str, operator.add] + market: str + + tool_two_graph = StateGraph(State) + tool_two_graph.set_entry_point("prepare") + tool_two_graph.set_finish_point("finish") + tool_two_graph.add_conditional_edges( + source="prepare", + path=lambda s: "tool_two_slow" if s["market"] == "DE" else "tool_two_fast", + then="finish", + ) + tool_two_graph.add_node("prepare", lambda s: {"my_key": " prepared"}) + tool_two_graph.add_node("tool_two_slow", lambda s: {"my_key": " slow"}) + tool_two_graph.add_node("tool_two_fast", lambda s: {"my_key": " fast"}) + tool_two_graph.add_node("finish", lambda s: {"my_key": " finished"}) + tool_two = tool_two_graph.compile() + + assert await tool_two.ainvoke({"my_key": "value", "market": "DE"}, debug=1) == { + "my_key": "value prepared slow finished", + "market": "DE", + } + assert await tool_two.ainvoke({"my_key": "value", "market": "US"}) == { + "my_key": "value prepared fast finished", + "market": "US", + } + + async with awith_checkpointer(checkpointer_name) as checkpointer: + # test stream_mode=debug + tool_two = tool_two_graph.compile(checkpointer=checkpointer) + thread10 = {"configurable": {"thread_id": "10"}} + assert [ + c + async for c in tool_two.astream( + {"my_key": "value", "market": "DE"}, thread10, stream_mode="debug" + ) + ] == [ + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": -1, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": {"my_key": ""}, + "metadata": { + "parents": {}, + "source": "input", + "step": -1, + "writes": {"__start__": {"my_key": "value", "market": "DE"}}, + "thread_id": "10", + }, + "parent_config": None, + "next": ["__start__"], + "tasks": [ + { + "id": AnyStr(), + "name": "__start__", + "interrupts": (), + "state": None, + } + ], + }, + }, + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": 0, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": { + "my_key": "value", + "market": "DE", + }, + "metadata": { + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "10", + }, + "parent_config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "next": ["prepare"], + "tasks": [ + { + "id": AnyStr(), + "name": "prepare", + "interrupts": (), + "state": None, + } + ], + }, + }, + { + "type": "task", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "prepare", + "input": {"my_key": "value", "market": "DE"}, + "triggers": ["start:prepare"], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "prepare", + "result": [("my_key", " prepared")], + "error": None, + "interrupts": [], + }, + }, + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": { + "my_key": "value prepared", + "market": "DE", + }, + "metadata": { + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "10", + }, + "parent_config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "next": ["tool_two_slow"], + "tasks": [ + { + "id": AnyStr(), + "name": "tool_two_slow", + "interrupts": (), + "state": None, + } + ], + }, + }, + { + "type": "task", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "tool_two_slow", + "input": {"my_key": "value prepared", "market": "DE"}, + "triggers": ["branch:prepare:condition:tool_two_slow"], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "id": AnyStr(), + "name": "tool_two_slow", + "result": [("my_key", " slow")], + "error": None, + "interrupts": [], + }, + }, + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": 2, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": { + "my_key": "value prepared slow", + "market": "DE", + }, + "metadata": { + "parents": {}, + "source": "loop", + "step": 2, + "writes": {"tool_two_slow": {"my_key": " slow"}}, + "thread_id": "10", + }, + "parent_config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "next": ["finish"], + "tasks": [ + { + "id": AnyStr(), + "name": "finish", + "interrupts": (), + "state": None, + } + ], + }, + }, + { + "type": "task", + "timestamp": AnyStr(), + "step": 3, + "payload": { + "id": AnyStr(), + "name": "finish", + "input": {"my_key": "value prepared slow", "market": "DE"}, + "triggers": ["branch:prepare:condition::then"], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 3, + "payload": { + "id": AnyStr(), + "name": "finish", + "result": [("my_key", " finished")], + "error": None, + "interrupts": [], + }, + }, + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": 3, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": { + "my_key": "value prepared slow finished", + "market": "DE", + }, + "metadata": { + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "10", + }, + "parent_config": { + "tags": [], + "metadata": {"thread_id": "10"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "10", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "next": [], + "tasks": [], + }, + }, + ] + + tool_two = tool_two_graph.compile( + checkpointer=checkpointer, + interrupt_before=["tool_two_fast", "tool_two_slow"], + ) + + # missing thread_id + with pytest.raises(ValueError, match="thread_id"): + await tool_two.ainvoke({"my_key": "value", "market": "DE"}) + + thread1 = {"configurable": {"thread_id": "11"}} + # stop when about to enter node + assert [ + c + async for c in tool_two.astream( + {"my_key": "value", "market": "DE"}, thread1, stream_mode="debug" + ) + ] == [ + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": -1, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "11"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "11", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": {"my_key": ""}, + "metadata": { + "parents": {}, + "source": "input", + "step": -1, + "writes": {"__start__": {"my_key": "value", "market": "DE"}}, + "thread_id": "11", + }, + "parent_config": None, + "next": ["__start__"], + "tasks": [ + { + "id": AnyStr(), + "name": "__start__", + "interrupts": (), + "state": None, + } + ], + }, + }, + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": 0, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "11"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "11", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": { + "my_key": "value", + "market": "DE", + }, + "metadata": { + "parents": {}, + "source": "loop", + "step": 0, + "writes": None, + "thread_id": "11", + }, + "parent_config": { + "tags": [], + "metadata": {"thread_id": "11"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "11", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "next": ["prepare"], + "tasks": [ + { + "id": AnyStr(), + "name": "prepare", + "interrupts": (), + "state": None, + } + ], + }, + }, + { + "type": "task", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "prepare", + "input": {"my_key": "value", "market": "DE"}, + "triggers": ["start:prepare"], + }, + }, + { + "type": "task_result", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "id": AnyStr(), + "name": "prepare", + "result": [("my_key", " prepared")], + "error": None, + "interrupts": [], + }, + }, + { + "type": "checkpoint", + "timestamp": AnyStr(), + "step": 1, + "payload": { + "config": { + "tags": [], + "metadata": {"thread_id": "11"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "11", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "values": { + "my_key": "value prepared", + "market": "DE", + }, + "metadata": { + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "11", + }, + "parent_config": { + "tags": [], + "metadata": {"thread_id": "11"}, + "callbacks": None, + "recursion_limit": 25, + "configurable": { + "thread_id": "11", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + }, + }, + "next": ["tool_two_slow"], + "tasks": [ + { + "id": AnyStr(), + "name": "tool_two_slow", + "interrupts": (), + "state": None, + } + ], + }, + }, + ] + assert await tool_two.aget_state(thread1) == StateSnapshot( + values={"my_key": "value prepared", "market": "DE"}, + tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),), + next=("tool_two_slow",), + config=(await tool_two.checkpointer.aget_tuple(thread1)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "11", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread1, limit=2) + ][-1].config, + ) + # resume, for same result as above + assert await tool_two.ainvoke(None, thread1, debug=1) == { + "my_key": "value prepared slow finished", + "market": "DE", + } + assert await tool_two.aget_state(thread1) == StateSnapshot( + values={"my_key": "value prepared slow finished", "market": "DE"}, + tasks=(), + next=(), + config=(await tool_two.checkpointer.aget_tuple(thread1)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "11", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread1, limit=2) + ][-1].config, + ) + + thread2 = {"configurable": {"thread_id": "12"}} + # stop when about to enter node + assert await tool_two.ainvoke({"my_key": "value", "market": "US"}, thread2) == { + "my_key": "value prepared", + "market": "US", + } + assert await tool_two.aget_state(thread2) == StateSnapshot( + values={"my_key": "value prepared", "market": "US"}, + tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),), + next=("tool_two_fast",), + config=(await tool_two.checkpointer.aget_tuple(thread2)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "12", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread2, limit=2) + ][-1].config, + ) + # resume, for same result as above + assert await tool_two.ainvoke(None, thread2, debug=1) == { + "my_key": "value prepared fast finished", + "market": "US", + } + assert await tool_two.aget_state(thread2) == StateSnapshot( + values={"my_key": "value prepared fast finished", "market": "US"}, + tasks=(), + next=(), + config=(await tool_two.checkpointer.aget_tuple(thread2)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "12", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread2, limit=2) + ][-1].config, + ) + + tool_two = tool_two_graph.compile( + checkpointer=checkpointer, interrupt_after=["prepare"] + ) + + # missing thread_id + with pytest.raises(ValueError, match="thread_id"): + await tool_two.ainvoke({"my_key": "value", "market": "DE"}) + + thread1 = {"configurable": {"thread_id": "21"}} + # stop when about to enter node + assert await tool_two.ainvoke({"my_key": "value", "market": "DE"}, thread1) == { + "my_key": "value prepared", + "market": "DE", + } + assert await tool_two.aget_state(thread1) == StateSnapshot( + values={"my_key": "value prepared", "market": "DE"}, + tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),), + next=("tool_two_slow",), + config=(await tool_two.checkpointer.aget_tuple(thread1)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "21", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread1, limit=2) + ][-1].config, + ) + # resume, for same result as above + assert await tool_two.ainvoke(None, thread1, debug=1) == { + "my_key": "value prepared slow finished", + "market": "DE", + } + assert await tool_two.aget_state(thread1) == StateSnapshot( + values={"my_key": "value prepared slow finished", "market": "DE"}, + tasks=(), + next=(), + config=(await tool_two.checkpointer.aget_tuple(thread1)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "21", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread1, limit=2) + ][-1].config, + ) + + thread2 = {"configurable": {"thread_id": "22"}} + # stop when about to enter node + assert await tool_two.ainvoke({"my_key": "value", "market": "US"}, thread2) == { + "my_key": "value prepared", + "market": "US", + } + assert await tool_two.aget_state(thread2) == StateSnapshot( + values={"my_key": "value prepared", "market": "US"}, + tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),), + next=("tool_two_fast",), + config=(await tool_two.checkpointer.aget_tuple(thread2)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "22", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread2, limit=2) + ][-1].config, + ) + # resume, for same result as above + assert await tool_two.ainvoke(None, thread2, debug=1) == { + "my_key": "value prepared fast finished", + "market": "US", + } + assert await tool_two.aget_state(thread2) == StateSnapshot( + values={"my_key": "value prepared fast finished", "market": "US"}, + tasks=(), + next=(), + config=(await tool_two.checkpointer.aget_tuple(thread2)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "22", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread2, limit=2) + ][-1].config, + ) + + thread3 = {"configurable": {"thread_id": "23"}} + # update an empty thread before first run + uconfig = await tool_two.aupdate_state( + thread3, {"my_key": "key", "market": "DE"} + ) + # check current state + assert await tool_two.aget_state(thread3) == StateSnapshot( + values={"my_key": "key", "market": "DE"}, + tasks=(PregelTask(AnyStr(), "prepare", (PULL, "prepare")),), + next=("prepare",), + config=uconfig, + created_at=AnyStr(), + metadata={ + "parents": {}, + "source": "update", + "step": 0, + "writes": {START: {"my_key": "key", "market": "DE"}}, + "thread_id": "23", + }, + parent_config=None, + ) + # run from this point + assert await tool_two.ainvoke(None, thread3) == { + "my_key": "key prepared", + "market": "DE", + } + # get state after first node + assert await tool_two.aget_state(thread3) == StateSnapshot( + values={"my_key": "key prepared", "market": "DE"}, + tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),), + next=("tool_two_slow",), + config=(await tool_two.checkpointer.aget_tuple(thread3)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread3)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 1, + "writes": {"prepare": {"my_key": " prepared"}}, + "thread_id": "23", + }, + parent_config=uconfig, + ) + # resume, for same result as above + assert await tool_two.ainvoke(None, thread3, debug=1) == { + "my_key": "key prepared slow finished", + "market": "DE", + } + assert await tool_two.aget_state(thread3) == StateSnapshot( + values={"my_key": "key prepared slow finished", "market": "DE"}, + tasks=(), + next=(), + config=(await tool_two.checkpointer.aget_tuple(thread3)).config, + created_at=(await tool_two.checkpointer.aget_tuple(thread3)).checkpoint[ + "ts" + ], + metadata={ + "parents": {}, + "source": "loop", + "step": 3, + "writes": {"finish": {"my_key": " finished"}}, + "thread_id": "23", + }, + parent_config=[ + c async for c in tool_two.checkpointer.alist(thread3, limit=2) + ][-1].config, + ) + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_in_one_fan_out_state_graph_waiting_edge(checkpointer_name: str) -> None: + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], sorted_add] + + async def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + async def analyzer_one(data: State) -> State: + return {"query": f'analyzed: {data["query"]}'} + + async def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + async def retriever_two(data: State) -> State: + await asyncio.sleep(0.1) + return {"docs": ["doc3", "doc4"]} + + async def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_edge("rewrite_query", "retriever_two") + workflow.add_edge(["retriever_one", "retriever_two"], "qa") + workflow.set_finish_point("qa") + + app = workflow.compile() + + assert await app.ainvoke({"query": "what is weather in sf"}) == { + "query": "analyzed: query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + } + + assert [c async for c in app.astream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["retriever_one"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c + async for c in app_w_interrupt.astream( + {"query": "what is weather in sf"}, config + ) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"__interrupt__": ()}, + ] + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_in_one_fan_out_state_graph_waiting_edge_via_branch( + snapshot: SnapshotAssertion, checkpointer_name: str +) -> None: + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], sorted_add] + + async def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + async def analyzer_one(data: State) -> State: + return {"query": f'analyzed: {data["query"]}'} + + async def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + async def retriever_two(data: State) -> State: + await asyncio.sleep(0.1) + return {"docs": ["doc3", "doc4"]} + + async def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_conditional_edges( + "rewrite_query", lambda _: "retriever_two", {"retriever_two": "retriever_two"} + ) + workflow.add_edge(["retriever_one", "retriever_two"], "qa") + workflow.set_finish_point("qa") + + app = workflow.compile() + + assert await app.ainvoke({"query": "what is weather in sf"}, debug=True) == { + "query": "analyzed: query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + } + + assert [c async for c in app.astream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["retriever_one"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c + async for c in app_w_interrupt.astream( + {"query": "what is weather in sf"}, config + ) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"__interrupt__": ()}, + ] + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_in_one_fan_out_state_graph_waiting_edge_custom_state_class( + snapshot: SnapshotAssertion, mocker: MockerFixture, checkpointer_name: str +) -> None: + from pydantic.v1 import BaseModel, ValidationError + + setup = mocker.Mock() + teardown = mocker.Mock() + + @asynccontextmanager + async def assert_ctx_once() -> AsyncIterator[None]: + assert setup.call_count == 0 + assert teardown.call_count == 0 + try: + yield + finally: + assert setup.call_count == 1 + assert teardown.call_count == 1 + setup.reset_mock() + teardown.reset_mock() + + @asynccontextmanager + async def make_httpx_client() -> AsyncIterator[httpx.AsyncClient]: + setup() + async with httpx.AsyncClient() as client: + try: + yield client + finally: + teardown() + + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class State(BaseModel): + class Config: + arbitrary_types_allowed = True + + query: str + answer: Optional[str] = None + docs: Annotated[list[str], sorted_add] + client: Annotated[httpx.AsyncClient, Context(make_httpx_client)] + + class Input(BaseModel): + query: str + + class Output(BaseModel): + answer: str + docs: list[str] + + class StateUpdate(BaseModel): + query: Optional[str] = None + answer: Optional[str] = None + docs: Optional[list[str]] = None + + async def rewrite_query(data: State) -> State: + return {"query": f"query: {data.query}"} + + async def analyzer_one(data: State) -> State: + return StateUpdate(query=f"analyzed: {data.query}") + + async def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + async def retriever_two(data: State) -> State: + await asyncio.sleep(0.1) + return {"docs": ["doc3", "doc4"]} + + async def qa(data: State) -> State: + return {"answer": ",".join(data.docs)} + + async def decider(data: State) -> str: + assert isinstance(data, State) + return "retriever_two" + + workflow = StateGraph(State, input=Input, output=Output) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_conditional_edges( + "rewrite_query", decider, {"retriever_two": "retriever_two"} + ) + workflow.add_edge(["retriever_one", "retriever_two"], "qa") + workflow.set_finish_point("qa") + + app = workflow.compile() + + async with assert_ctx_once(): + with pytest.raises(ValidationError): + await app.ainvoke({"query": {}}) + + async with assert_ctx_once(): + assert await app.ainvoke({"query": "what is weather in sf"}) == { + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + } + + async with assert_ctx_once(): + assert [c async for c in app.astream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["retriever_one"], + ) + config = {"configurable": {"thread_id": "1"}} + + async with assert_ctx_once(): + assert [ + c + async for c in app_w_interrupt.astream( + {"query": "what is weather in sf"}, config + ) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"__interrupt__": ()}, + ] + + async with assert_ctx_once(): + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + assert await app_w_interrupt.aget_state(config) == StateSnapshot( + values={ + "query": "analyzed: query: what is weather in sf", + "answer": "doc1,doc2,doc3,doc4", + "docs": ["doc1", "doc2", "doc3", "doc4"], + }, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + "step": 4, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + + async with assert_ctx_once(): + assert await app_w_interrupt.aupdate_state( + config, {"docs": ["doc5"]}, as_node="rewrite_query" + ) == { + "configurable": { + "thread_id": "1", + "checkpoint_id": AnyStr(), + "checkpoint_ns": "", + } + } + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2( + snapshot: SnapshotAssertion, checkpointer_name: str +) -> None: + from pydantic import BaseModel, ValidationError + + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class InnerObject(BaseModel): + yo: int + + class State(BaseModel): + query: str + inner: InnerObject + answer: Optional[str] = None + docs: Annotated[list[str], sorted_add] + + class StateUpdate(BaseModel): + query: Optional[str] = None + answer: Optional[str] = None + docs: Optional[list[str]] = None + + async def rewrite_query(data: State) -> State: + return {"query": f"query: {data.query}"} + + async def analyzer_one(data: State) -> State: + return StateUpdate(query=f"analyzed: {data.query}") + + async def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + async def retriever_two(data: State) -> State: + await asyncio.sleep(0.1) + return {"docs": ["doc3", "doc4"]} + + async def qa(data: State) -> State: + return {"answer": ",".join(data.docs)} + + async def decider(data: State) -> str: + assert isinstance(data, State) + return "retriever_two" + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_conditional_edges( + "rewrite_query", decider, {"retriever_two": "retriever_two"} + ) + workflow.add_edge(["retriever_one", "retriever_two"], "qa") + workflow.set_finish_point("qa") + + app = workflow.compile() + + if SHOULD_CHECK_SNAPSHOTS: + assert app.get_graph().draw_mermaid(with_styles=False) == snapshot + assert app.get_input_schema().model_json_schema() == snapshot + assert app.get_output_schema().model_json_schema() == snapshot + + with pytest.raises(ValidationError): + await app.ainvoke({"query": {}}) + + assert await app.ainvoke( + {"query": "what is weather in sf", "inner": {"yo": 1}} + ) == { + "query": "analyzed: query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + "inner": {"yo": 1}, + } + + assert [ + c + async for c in app.astream( + {"query": "what is weather in sf", "inner": {"yo": 1}} + ) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["retriever_one"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c + async for c in app_w_interrupt.astream( + {"query": "what is weather in sf", "inner": {"yo": 1}}, config + ) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"__interrupt__": ()}, + ] + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + assert await app_w_interrupt.aupdate_state( + config, {"docs": ["doc5"]}, as_node="rewrite_query" + ) == { + "configurable": { + "thread_id": "1", + "checkpoint_id": AnyStr(), + "checkpoint_ns": "", + } + } + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_in_one_fan_out_state_graph_waiting_edge_plus_regular( + checkpointer_name: str, +) -> None: + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], sorted_add] + + async def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + async def analyzer_one(data: State) -> State: + await asyncio.sleep(0.1) + return {"query": f'analyzed: {data["query"]}'} + + async def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + async def retriever_two(data: State) -> State: + await asyncio.sleep(0.2) + return {"docs": ["doc3", "doc4"]} + + async def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_edge("rewrite_query", "retriever_two") + workflow.add_edge(["retriever_one", "retriever_two"], "qa") + workflow.set_finish_point("qa") + + # silly edge, to make sure having been triggered before doesn't break + # semantics of named barrier (== waiting edges) + workflow.add_edge("rewrite_query", "qa") + + app = workflow.compile() + + assert await app.ainvoke({"query": "what is weather in sf"}) == { + "query": "analyzed: query: what is weather in sf", + "docs": ["doc1", "doc2", "doc3", "doc4"], + "answer": "doc1,doc2,doc3,doc4", + } + + assert [c async for c in app.astream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"qa": {"answer": ""}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app_w_interrupt = workflow.compile( + checkpointer=checkpointer, + interrupt_after=["retriever_one"], + ) + config = {"configurable": {"thread_id": "1"}} + + assert [ + c + async for c in app_w_interrupt.astream( + {"query": "what is weather in sf"}, config + ) + ] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"qa": {"answer": ""}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"__interrupt__": ()}, + ] + + assert [c async for c in app_w_interrupt.astream(None, config)] == [ + {"qa": {"answer": "doc1,doc2,doc3,doc4"}}, + ] + + +async def test_in_one_fan_out_state_graph_waiting_edge_multiple() -> None: + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], sorted_add] + + async def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + async def analyzer_one(data: State) -> State: + return {"query": f'analyzed: {data["query"]}'} + + async def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + async def retriever_two(data: State) -> State: + await asyncio.sleep(0.1) + return {"docs": ["doc3", "doc4"]} + + async def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + async def decider(data: State) -> None: + return None + + def decider_cond(data: State) -> str: + if data["query"].count("analyzed") > 1: + return "qa" + else: + return "rewrite_query" + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("decider", decider) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_edge("rewrite_query", "analyzer_one") + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_edge("rewrite_query", "retriever_two") + workflow.add_edge(["retriever_one", "retriever_two"], "decider") + workflow.add_conditional_edges("decider", decider_cond) + workflow.set_finish_point("qa") + + app = workflow.compile() + + assert await app.ainvoke({"query": "what is weather in sf"}) == { + "query": "analyzed: query: analyzed: query: what is weather in sf", + "answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4", + "docs": ["doc1", "doc1", "doc2", "doc2", "doc3", "doc3", "doc4", "doc4"], + } + + assert [c async for c in app.astream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"decider": None}, + {"rewrite_query": {"query": "query: analyzed: query: what is weather in sf"}}, + { + "analyzer_one": { + "query": "analyzed: query: analyzed: query: what is weather in sf" + } + }, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"decider": None}, + {"qa": {"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4"}}, + ] + + +async def test_in_one_fan_out_state_graph_waiting_edge_multiple_cond_edge() -> None: + def sorted_add( + x: list[str], y: Union[list[str], list[tuple[str, str]]] + ) -> list[str]: + if isinstance(y[0], tuple): + for rem, _ in y: + x.remove(rem) + y = [t[1] for t in y] + return sorted(operator.add(x, y)) + + class State(TypedDict, total=False): + query: str + answer: str + docs: Annotated[list[str], sorted_add] + + async def rewrite_query(data: State) -> State: + return {"query": f'query: {data["query"]}'} + + async def retriever_picker(data: State) -> list[str]: + return ["analyzer_one", "retriever_two"] + + async def analyzer_one(data: State) -> State: + return {"query": f'analyzed: {data["query"]}'} + + async def retriever_one(data: State) -> State: + return {"docs": ["doc1", "doc2"]} + + async def retriever_two(data: State) -> State: + await asyncio.sleep(0.1) + return {"docs": ["doc3", "doc4"]} + + async def qa(data: State) -> State: + return {"answer": ",".join(data["docs"])} + + async def decider(data: State) -> None: + return None + + def decider_cond(data: State) -> str: + if data["query"].count("analyzed") > 1: + return "qa" + else: + return "rewrite_query" + + workflow = StateGraph(State) + + workflow.add_node("rewrite_query", rewrite_query) + workflow.add_node("analyzer_one", analyzer_one) + workflow.add_node("retriever_one", retriever_one) + workflow.add_node("retriever_two", retriever_two) + workflow.add_node("decider", decider) + workflow.add_node("qa", qa) + + workflow.set_entry_point("rewrite_query") + workflow.add_conditional_edges("rewrite_query", retriever_picker) + workflow.add_edge("analyzer_one", "retriever_one") + workflow.add_edge(["retriever_one", "retriever_two"], "decider") + workflow.add_conditional_edges("decider", decider_cond) + workflow.set_finish_point("qa") + + app = workflow.compile() + + assert await app.ainvoke({"query": "what is weather in sf"}) == { + "query": "analyzed: query: analyzed: query: what is weather in sf", + "answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4", + "docs": ["doc1", "doc1", "doc2", "doc2", "doc3", "doc3", "doc4", "doc4"], + } + + assert [c async for c in app.astream({"query": "what is weather in sf"})] == [ + {"rewrite_query": {"query": "query: what is weather in sf"}}, + {"analyzer_one": {"query": "analyzed: query: what is weather in sf"}}, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"decider": None}, + {"rewrite_query": {"query": "query: analyzed: query: what is weather in sf"}}, + { + "analyzer_one": { + "query": "analyzed: query: analyzed: query: what is weather in sf" + } + }, + {"retriever_two": {"docs": ["doc3", "doc4"]}}, + {"retriever_one": {"docs": ["doc1", "doc2"]}}, + {"decider": None}, + {"qa": {"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4"}}, + ] + + +async def test_nested_graph(snapshot: SnapshotAssertion) -> None: + def never_called_fn(state: Any): + assert 0, "This function should never be called" + + never_called = RunnableLambda(never_called_fn) + + class InnerState(TypedDict): + my_key: str + my_other_key: str + + def up(state: InnerState): + return {"my_key": state["my_key"] + " there", "my_other_key": state["my_key"]} + + inner = StateGraph(InnerState) + inner.add_node("up", up) + inner.set_entry_point("up") + inner.set_finish_point("up") + + class State(TypedDict): + my_key: str + never_called: Any + + async def side(state: State): + return {"my_key": state["my_key"] + " and back again"} + + graph = StateGraph(State) + graph.add_node("inner", inner.compile()) + graph.add_node("side", side) + graph.set_entry_point("inner") + graph.add_edge("inner", "side") + graph.set_finish_point("side") + + app = graph.compile() + + assert await app.ainvoke({"my_key": "my value", "never_called": never_called}) == { + "my_key": "my value there and back again", + "never_called": never_called, + } + assert [ + chunk + async for chunk in app.astream( + {"my_key": "my value", "never_called": never_called} + ) + ] == [ + {"inner": {"my_key": "my value there"}}, + {"side": {"my_key": "my value there and back again"}}, + ] + assert [ + chunk + async for chunk in app.astream( + {"my_key": "my value", "never_called": never_called}, stream_mode="values" + ) + ] == [ + {"my_key": "my value", "never_called": never_called}, + {"my_key": "my value there", "never_called": never_called}, + {"my_key": "my value there and back again", "never_called": never_called}, + ] + times_called = 0 + async for event in app.astream_events( + {"my_key": "my value", "never_called": never_called}, + version="v2", + config={"run_id": UUID(int=0)}, + stream_mode="values", + ): + if event["event"] == "on_chain_end" and event["run_id"] == str(UUID(int=0)): + times_called += 1 + assert event["data"] == { + "output": { + "my_key": "my value there and back again", + "never_called": never_called, + } + } + assert times_called == 1 + times_called = 0 + async for event in app.astream_events( + {"my_key": "my value", "never_called": never_called}, + version="v2", + config={"run_id": UUID(int=0)}, + ): + if event["event"] == "on_chain_end" and event["run_id"] == str(UUID(int=0)): + times_called += 1 + assert event["data"] == { + "output": { + "my_key": "my value there and back again", + "never_called": never_called, + } + } + assert times_called == 1 + + chain = app | RunnablePassthrough() + + assert await chain.ainvoke( + {"my_key": "my value", "never_called": never_called} + ) == { + "my_key": "my value there and back again", + "never_called": never_called, + } + assert [ + chunk + async for chunk in chain.astream( + {"my_key": "my value", "never_called": never_called} + ) + ] == [ + {"inner": {"my_key": "my value there"}}, + {"side": {"my_key": "my value there and back again"}}, + ] + times_called = 0 + async for event in chain.astream_events( + {"my_key": "my value", "never_called": never_called}, + version="v2", + config={"run_id": UUID(int=0)}, + ): + if event["event"] == "on_chain_end" and event["run_id"] == str(UUID(int=0)): + times_called += 1 + assert event["data"] == { + "output": [ + {"inner": {"my_key": "my value there"}}, + {"side": {"my_key": "my value there and back again"}}, + ] + } + assert times_called == 1 + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_stream_subgraphs_during_execution(checkpointer_name: str) -> None: + class InnerState(TypedDict): + my_key: Annotated[str, operator.add] + my_other_key: str + + async def inner_1(state: InnerState): + return {"my_key": "got here", "my_other_key": state["my_key"]} + + async def inner_2(state: InnerState): + await asyncio.sleep(0.5) + return { + "my_key": " and there", + "my_other_key": state["my_key"], + } + + inner = StateGraph(InnerState) + inner.add_node("inner_1", inner_1) + inner.add_node("inner_2", inner_2) + inner.add_edge("inner_1", "inner_2") + inner.set_entry_point("inner_1") + inner.set_finish_point("inner_2") + + class State(TypedDict): + my_key: Annotated[str, operator.add] + + async def outer_1(state: State): + await asyncio.sleep(0.2) + return {"my_key": " and parallel"} + + async def outer_2(state: State): + return {"my_key": " and back again"} + + graph = StateGraph(State) + graph.add_node("inner", inner.compile()) + graph.add_node("outer_1", outer_1) + graph.add_node("outer_2", outer_2) + + graph.add_edge(START, "inner") + graph.add_edge(START, "outer_1") + graph.add_edge(["inner", "outer_1"], "outer_2") + graph.add_edge("outer_2", END) + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app = graph.compile(checkpointer=checkpointer) + + start = perf_counter() + chunks: list[tuple[float, Any]] = [] + config = {"configurable": {"thread_id": "2"}} + async for c in app.astream({"my_key": ""}, config, subgraphs=True): + chunks.append((round(perf_counter() - start, 1), c)) + for idx in range(len(chunks)): + elapsed, c = chunks[idx] + chunks[idx] = (round(elapsed - chunks[0][0], 1), c) + + assert chunks == [ + # arrives before "inner" finishes + ( + FloatBetween(0.0, 0.1), + ( + (AnyStr("inner:"),), + {"inner_1": {"my_key": "got here", "my_other_key": ""}}, + ), + ), + (FloatBetween(0.2, 0.4), ((), {"outer_1": {"my_key": " and parallel"}})), + ( + FloatBetween(0.5, 0.7), + ( + (AnyStr("inner:"),), + {"inner_2": {"my_key": " and there", "my_other_key": "got here"}}, + ), + ), + (FloatBetween(0.5, 0.7), ((), {"inner": {"my_key": "got here and there"}})), + (FloatBetween(0.5, 0.7), ((), {"outer_2": {"my_key": " and back again"}})), + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_stream_buffering_single_node(checkpointer_name: str) -> None: + class State(TypedDict): + my_key: Annotated[str, operator.add] + + async def node(state: State, writer: StreamWriter): + writer("Before sleep") + await asyncio.sleep(0.2) + writer("After sleep") + return {"my_key": "got here"} + + builder = StateGraph(State) + builder.add_node("node", node) + builder.add_edge(START, "node") + builder.add_edge("node", END) + + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer) + + start = perf_counter() + chunks: list[tuple[float, Any]] = [] + config = {"configurable": {"thread_id": "2"}} + async for c in graph.astream({"my_key": ""}, config, stream_mode="custom"): + chunks.append((round(perf_counter() - start, 1), c)) + + assert chunks == [ + (FloatBetween(0.0, 0.1), "Before sleep"), + (FloatBetween(0.2, 0.3), "After sleep"), + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_nested_graph_interrupts_parallel(checkpointer_name: str) -> None: + class InnerState(TypedDict): + my_key: Annotated[str, operator.add] + my_other_key: str + + async def inner_1(state: InnerState): + await asyncio.sleep(0.1) + return {"my_key": "got here", "my_other_key": state["my_key"]} + + async def inner_2(state: InnerState): + return { + "my_key": " and there", + "my_other_key": state["my_key"], + } + + inner = StateGraph(InnerState) + inner.add_node("inner_1", inner_1) + inner.add_node("inner_2", inner_2) + inner.add_edge("inner_1", "inner_2") + inner.set_entry_point("inner_1") + inner.set_finish_point("inner_2") + + class State(TypedDict): + my_key: Annotated[str, operator.add] + + async def outer_1(state: State): + return {"my_key": " and parallel"} + + async def outer_2(state: State): + return {"my_key": " and back again"} + + graph = StateGraph(State) + graph.add_node( + "inner", + inner.compile(interrupt_before=["inner_2"]), + ) + graph.add_node("outer_1", outer_1) + graph.add_node("outer_2", outer_2) + + graph.add_edge(START, "inner") + graph.add_edge(START, "outer_1") + graph.add_edge(["inner", "outer_1"], "outer_2") + graph.set_finish_point("outer_2") + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app = graph.compile(checkpointer=checkpointer) + + # test invoke w/ nested interrupt + config = {"configurable": {"thread_id": "1"}} + assert await app.ainvoke({"my_key": ""}, config, debug=True) == { + "my_key": " and parallel", + } + + assert await app.ainvoke(None, config, debug=True) == { + "my_key": "got here and there and parallel and back again", + } + + # below combo of assertions is asserting two things + # - outer_1 finishes before inner interrupts (because we see its output in stream, which only happens after node finishes) + # - the writes of outer are persisted in 1st call and used in 2nd call, ie outer isn't called again (because we dont see outer_1 output again in 2nd stream) + # test stream updates w/ nested interrupt + config = {"configurable": {"thread_id": "2"}} + assert [ + c async for c in app.astream({"my_key": ""}, config, subgraphs=True) + ] == [ + # we got to parallel node first + ((), {"outer_1": {"my_key": " and parallel"}}), + ( + (AnyStr("inner:"),), + {"inner_1": {"my_key": "got here", "my_other_key": ""}}, + ), + ((), {"__interrupt__": ()}), + ] + assert [c async for c in app.astream(None, config)] == [ + {"outer_1": {"my_key": " and parallel"}, "__metadata__": {"cached": True}}, + {"inner": {"my_key": "got here and there"}}, + {"outer_2": {"my_key": " and back again"}}, + ] + + # test stream values w/ nested interrupt + config = {"configurable": {"thread_id": "3"}} + assert [ + c async for c in app.astream({"my_key": ""}, config, stream_mode="values") + ] == [ + {"my_key": ""}, + {"my_key": " and parallel"}, + ] + assert [c async for c in app.astream(None, config, stream_mode="values")] == [ + {"my_key": ""}, + {"my_key": "got here and there and parallel"}, + {"my_key": "got here and there and parallel and back again"}, + ] + + # # test interrupts BEFORE the parallel node + app = graph.compile(checkpointer=checkpointer, interrupt_before=["outer_1"]) + config = {"configurable": {"thread_id": "4"}} + assert [ + c async for c in app.astream({"my_key": ""}, config, stream_mode="values") + ] == [ + {"my_key": ""}, + ] + # while we're waiting for the node w/ interrupt inside to finish + assert [c async for c in app.astream(None, config, stream_mode="values")] == [ + {"my_key": ""}, + {"my_key": " and parallel"}, + ] + assert [c async for c in app.astream(None, config, stream_mode="values")] == [ + {"my_key": ""}, + {"my_key": "got here and there and parallel"}, + {"my_key": "got here and there and parallel and back again"}, + ] + + # test interrupts AFTER the parallel node + app = graph.compile(checkpointer=checkpointer, interrupt_after=["outer_1"]) + config = {"configurable": {"thread_id": "5"}} + assert [ + c async for c in app.astream({"my_key": ""}, config, stream_mode="values") + ] == [ + {"my_key": ""}, + {"my_key": " and parallel"}, + ] + assert [c async for c in app.astream(None, config, stream_mode="values")] == [ + {"my_key": ""}, + {"my_key": "got here and there and parallel"}, + ] + assert [c async for c in app.astream(None, config, stream_mode="values")] == [ + {"my_key": "got here and there and parallel"}, + {"my_key": "got here and there and parallel and back again"}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_doubly_nested_graph_interrupts(checkpointer_name: str) -> None: + class State(TypedDict): + my_key: str + + class ChildState(TypedDict): + my_key: str + + class GrandChildState(TypedDict): + my_key: str + + async def grandchild_1(state: ChildState): + return {"my_key": state["my_key"] + " here"} + + async def grandchild_2(state: ChildState): + return { + "my_key": state["my_key"] + " and there", + } + + grandchild = StateGraph(GrandChildState) + grandchild.add_node("grandchild_1", grandchild_1) + grandchild.add_node("grandchild_2", grandchild_2) + grandchild.add_edge("grandchild_1", "grandchild_2") + grandchild.set_entry_point("grandchild_1") + grandchild.set_finish_point("grandchild_2") + + child = StateGraph(ChildState) + child.add_node( + "child_1", + grandchild.compile(interrupt_before=["grandchild_2"]), + ) + child.set_entry_point("child_1") + child.set_finish_point("child_1") + + async def parent_1(state: State): + return {"my_key": "hi " + state["my_key"]} + + async def parent_2(state: State): + return {"my_key": state["my_key"] + " and back again"} + + graph = StateGraph(State) + graph.add_node("parent_1", parent_1) + graph.add_node("child", child.compile()) + graph.add_node("parent_2", parent_2) + graph.set_entry_point("parent_1") + graph.add_edge("parent_1", "child") + graph.add_edge("child", "parent_2") + graph.set_finish_point("parent_2") + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app = graph.compile(checkpointer=checkpointer) + + # test invoke w/ nested interrupt + config = {"configurable": {"thread_id": "1"}} + assert await app.ainvoke({"my_key": "my value"}, config, debug=True) == { + "my_key": "hi my value", + } + + assert await app.ainvoke(None, config, debug=True) == { + "my_key": "hi my value here and there and back again", + } + + # test stream updates w/ nested interrupt + nodes: list[str] = [] + config = { + "configurable": {"thread_id": "2", CONFIG_KEY_NODE_FINISHED: nodes.append} + } + assert [c async for c in app.astream({"my_key": "my value"}, config)] == [ + {"parent_1": {"my_key": "hi my value"}}, + {"__interrupt__": ()}, + ] + assert nodes == ["parent_1", "grandchild_1"] + assert [c async for c in app.astream(None, config)] == [ + {"child": {"my_key": "hi my value here and there"}}, + {"parent_2": {"my_key": "hi my value here and there and back again"}}, + ] + assert nodes == [ + "parent_1", + "grandchild_1", + "grandchild_2", + "child_1", + "child", + "parent_2", + ] + + # test stream values w/ nested interrupt + config = {"configurable": {"thread_id": "3"}} + assert [ + c + async for c in app.astream( + {"my_key": "my value"}, config, stream_mode="values" + ) + ] == [ + {"my_key": "my value"}, + {"my_key": "hi my value"}, + ] + assert [c async for c in app.astream(None, config, stream_mode="values")] == [ + {"my_key": "hi my value"}, + {"my_key": "hi my value here and there"}, + {"my_key": "hi my value here and there and back again"}, + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_nested_graph_state(checkpointer_name: str) -> None: + class InnerState(TypedDict): + my_key: str + my_other_key: str + + def inner_1(state: InnerState): + return { + "my_key": state["my_key"] + " here", + "my_other_key": state["my_key"], + } + + def inner_2(state: InnerState): + return { + "my_key": state["my_key"] + " and there", + "my_other_key": state["my_key"], + } + + inner = StateGraph(InnerState) + inner.add_node("inner_1", inner_1) + inner.add_node("inner_2", inner_2) + inner.add_edge("inner_1", "inner_2") + inner.set_entry_point("inner_1") + inner.set_finish_point("inner_2") + + class State(TypedDict): + my_key: str + other_parent_key: str + + def outer_1(state: State): + return {"my_key": "hi " + state["my_key"]} + + def outer_2(state: State): + return {"my_key": state["my_key"] + " and back again"} + + graph = StateGraph(State) + graph.add_node("outer_1", outer_1) + graph.add_node( + "inner", + inner.compile(interrupt_before=["inner_2"]), + ) + graph.add_node("outer_2", outer_2) + graph.set_entry_point("outer_1") + graph.add_edge("outer_1", "inner") + graph.add_edge("inner", "outer_2") + graph.set_finish_point("outer_2") + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app = graph.compile(checkpointer=checkpointer) + + config = {"configurable": {"thread_id": "1"}} + await app.ainvoke({"my_key": "my value"}, config, debug=True) + # test state w/ nested subgraph state (right after interrupt) + # first get_state without subgraph state + assert await app.aget_state(config) == StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "inner", + (PULL, "inner"), + state={ + "configurable": {"thread_id": "1", "checkpoint_ns": AnyStr()} + }, + ), + ), + next=("inner",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"outer_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + # now, get_state with subgraphs state + assert await app.aget_state(config, subgraphs=True) == StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "inner", + (PULL, "inner"), + state=StateSnapshot( + values={ + "my_key": "hi my value here", + "my_other_key": "hi my value", + }, + tasks=( + PregelTask( + AnyStr(), + "inner_2", + (PULL, "inner_2"), + ), + ), + next=("inner_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "parents": { + "": AnyStr(), + }, + "source": "loop", + "writes": { + "inner_1": { + "my_key": "hi my value here", + "my_other_key": "hi my value", + } + }, + "step": 1, + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "langgraph_node": "inner", + "langgraph_path": [PULL, "inner"], + "langgraph_step": 2, + "langgraph_triggers": ["outer_1"], + "langgraph_checkpoint_ns": AnyStr("inner:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + ), + ), + ), + next=("inner",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"outer_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + # get_state_history returns outer graph checkpoints + history = [c async for c in app.aget_state_history(config)] + assert history == [ + StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "inner", + (PULL, "inner"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + } + }, + ), + ), + next=("inner",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"outer_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "my value"}, + tasks=( + PregelTask( + AnyStr(), + "outer_1", + (PULL, "outer_1"), + result={"my_key": "hi my value"}, + ), + ), + next=("outer_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": None, + "step": 0, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={}, + tasks=( + PregelTask( + AnyStr(), + "__start__", + (PULL, "__start__"), + result={"my_key": "my value"}, + ), + ), + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "writes": {"__start__": {"my_key": "my value"}}, + "step": -1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ), + ] + # get_state_history for a subgraph returns its checkpoints + child_history = [ + c async for c in app.aget_state_history(history[0].tasks[0].state) + ] + assert child_history == [ + StateSnapshot( + values={"my_key": "hi my value here", "my_other_key": "hi my value"}, + next=("inner_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "loop", + "writes": { + "inner_1": { + "my_key": "hi my value here", + "my_other_key": "hi my value", + } + }, + "step": 1, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "langgraph_node": "inner", + "langgraph_path": [PULL, "inner"], + "langgraph_step": 2, + "langgraph_triggers": ["outer_1"], + "langgraph_checkpoint_ns": AnyStr("inner:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + tasks=(PregelTask(AnyStr(), "inner_2", (PULL, "inner_2")),), + ), + StateSnapshot( + values={"my_key": "hi my value"}, + next=("inner_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "loop", + "writes": None, + "step": 0, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "langgraph_node": "inner", + "langgraph_path": [PULL, "inner"], + "langgraph_step": 2, + "langgraph_triggers": ["outer_1"], + "langgraph_checkpoint_ns": AnyStr("inner:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + tasks=( + PregelTask( + AnyStr(), + "inner_1", + (PULL, "inner_1"), + result={ + "my_key": "hi my value here", + "my_other_key": "hi my value", + }, + ), + ), + ), + StateSnapshot( + values={}, + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "input", + "writes": {"__start__": {"my_key": "hi my value"}}, + "step": -1, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("inner:"), + "langgraph_node": "inner", + "langgraph_path": [PULL, "inner"], + "langgraph_step": 2, + "langgraph_triggers": ["outer_1"], + "langgraph_checkpoint_ns": AnyStr("inner:"), + }, + created_at=AnyStr(), + parent_config=None, + tasks=( + PregelTask( + AnyStr(), + "__start__", + (PULL, "__start__"), + result={"my_key": "hi my value"}, + ), + ), + ), + ] + + # resume + await app.ainvoke(None, config, debug=True) + # test state w/ nested subgraph state (after resuming from interrupt) + assert await app.aget_state(config) == StateSnapshot( + values={"my_key": "hi my value here and there and back again"}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "outer_2": {"my_key": "hi my value here and there and back again"} + }, + "step": 3, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + # test full history at the end + actual_history = [c async for c in app.aget_state_history(config)] + expected_history = [ + StateSnapshot( + values={"my_key": "hi my value here and there and back again"}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "outer_2": { + "my_key": "hi my value here and there and back again" + } + }, + "step": 3, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "hi my value here and there"}, + tasks=( + PregelTask( + AnyStr(), + "outer_2", + (PULL, "outer_2"), + result={"my_key": "hi my value here and there and back again"}, + ), + ), + next=("outer_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"inner": {"my_key": "hi my value here and there"}}, + "step": 2, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "inner", + (PULL, "inner"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + } + }, + result={"my_key": "hi my value here and there"}, + ), + ), + next=("inner",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"outer_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "my value"}, + tasks=( + PregelTask( + AnyStr(), + "outer_1", + (PULL, "outer_1"), + result={"my_key": "hi my value"}, + ), + ), + next=("outer_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": None, + "step": 0, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={}, + tasks=( + PregelTask( + AnyStr(), + "__start__", + (PULL, "__start__"), + result={"my_key": "my value"}, + ), + ), + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "writes": {"__start__": {"my_key": "my value"}}, + "step": -1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ), + ] + assert actual_history == expected_history + # test looking up parent state by checkpoint ID + for actual_snapshot, expected_snapshot in zip(actual_history, expected_history): + assert await app.aget_state(actual_snapshot.config) == expected_snapshot + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_doubly_nested_graph_state(checkpointer_name: str) -> None: + class State(TypedDict): + my_key: str + + class ChildState(TypedDict): + my_key: str + + class GrandChildState(TypedDict): + my_key: str + + def grandchild_1(state: ChildState): + return {"my_key": state["my_key"] + " here"} + + def grandchild_2(state: ChildState): + return { + "my_key": state["my_key"] + " and there", + } + + grandchild = StateGraph(GrandChildState) + grandchild.add_node("grandchild_1", grandchild_1) + grandchild.add_node("grandchild_2", grandchild_2) + grandchild.add_edge("grandchild_1", "grandchild_2") + grandchild.set_entry_point("grandchild_1") + grandchild.set_finish_point("grandchild_2") + + child = StateGraph(ChildState) + child.add_node( + "child_1", + grandchild.compile(interrupt_before=["grandchild_2"]), + ) + child.set_entry_point("child_1") + child.set_finish_point("child_1") + + def parent_1(state: State): + return {"my_key": "hi " + state["my_key"]} + + def parent_2(state: State): + return {"my_key": state["my_key"] + " and back again"} + + graph = StateGraph(State) + graph.add_node("parent_1", parent_1) + graph.add_node("child", child.compile()) + graph.add_node("parent_2", parent_2) + graph.set_entry_point("parent_1") + graph.add_edge("parent_1", "child") + graph.add_edge("child", "parent_2") + graph.set_finish_point("parent_2") + + async with awith_checkpointer(checkpointer_name) as checkpointer: + app = graph.compile(checkpointer=checkpointer) + + # test invoke w/ nested interrupt + config = {"configurable": {"thread_id": "1"}} + assert [ + c async for c in app.astream({"my_key": "my value"}, config, subgraphs=True) + ] == [ + ((), {"parent_1": {"my_key": "hi my value"}}), + ( + (AnyStr("child:"), AnyStr("child_1:")), + {"grandchild_1": {"my_key": "hi my value here"}}, + ), + ((), {"__interrupt__": ()}), + ] + # get state without subgraphs + outer_state = await app.aget_state(config) + assert outer_state == StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "child", + (PULL, "child"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child"), + } + }, + ), + ), + next=("child",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"parent_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + child_state = await app.aget_state(outer_state.tasks[0].state) + assert ( + child_state.tasks[0] + == StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "child_1", + (PULL, "child_1"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + } + }, + ), + ), + next=("child_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {"": AnyStr()}, + "source": "loop", + "writes": None, + "step": 0, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + } + }, + ).tasks[0] + ) + grandchild_state = await app.aget_state(child_state.tasks[0].state) + assert grandchild_state == StateSnapshot( + values={"my_key": "hi my value here"}, + tasks=( + PregelTask( + AnyStr(), + "grandchild_2", + (PULL, "grandchild_2"), + ), + ), + next=("grandchild_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + metadata={ + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "source": "loop", + "writes": {"grandchild_1": {"my_key": "hi my value here"}}, + "step": 1, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [PULL, AnyStr("child_1")], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + ) + # get state with subgraphs + assert await app.aget_state(config, subgraphs=True) == StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "child", + (PULL, "child"), + state=StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "child_1", + (PULL, "child_1"), + state=StateSnapshot( + values={"my_key": "hi my value here"}, + tasks=( + PregelTask( + AnyStr(), + "grandchild_2", + (PULL, "grandchild_2"), + ), + ), + next=("grandchild_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr( + re.compile(r"child:.+|child1:") + ): AnyStr(), + } + ), + } + }, + metadata={ + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "source": "loop", + "writes": { + "grandchild_1": { + "my_key": "hi my value here" + } + }, + "step": 1, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [ + PULL, + AnyStr("child_1"), + ], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr( + re.compile(r"child:.+|child1:") + ): AnyStr(), + } + ), + } + }, + ), + ), + ), + next=("child_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "parents": {"": AnyStr()}, + "source": "loop", + "writes": None, + "step": 0, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child", + "langgraph_path": [PULL, AnyStr("child")], + "langgraph_step": 2, + "langgraph_triggers": [AnyStr("parent_1")], + "langgraph_checkpoint_ns": AnyStr("child:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + ), + ), + ), + next=("child",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"parent_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + # resume + assert [c async for c in app.astream(None, config, subgraphs=True)] == [ + ( + (AnyStr("child:"), AnyStr("child_1:")), + {"grandchild_2": {"my_key": "hi my value here and there"}}, + ), + ( + (AnyStr("child:"),), + {"child_1": {"my_key": "hi my value here and there"}}, + ), + ((), {"child": {"my_key": "hi my value here and there"}}), + ((), {"parent_2": {"my_key": "hi my value here and there and back again"}}), + ] + # get state with and without subgraphs + assert ( + await app.aget_state(config) + == await app.aget_state(config, subgraphs=True) + == StateSnapshot( + values={"my_key": "hi my value here and there and back again"}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "parent_2": { + "my_key": "hi my value here and there and back again" + } + }, + "step": 3, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + ) + # get outer graph history + outer_history = [c async for c in app.aget_state_history(config)] + assert ( + outer_history[0] + == [ + StateSnapshot( + values={"my_key": "hi my value here and there and back again"}, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "parent_2": { + "my_key": "hi my value here and there and back again" + } + }, + "step": 3, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "hi my value here and there"}, + next=("parent_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"child": {"my_key": "hi my value here and there"}}, + "step": 2, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), name="parent_2", path=(PULL, "parent_2") + ), + ), + ), + StateSnapshot( + values={"my_key": "hi my value"}, + tasks=( + PregelTask( + AnyStr(), + "child", + (PULL, "child"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child"), + } + }, + ), + ), + next=("child",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": {"parent_1": {"my_key": "hi my value"}}, + "step": 1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"my_key": "my value"}, + next=("parent_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": None, + "step": 0, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), name="parent_1", path=(PULL, "parent_1") + ), + ), + ), + StateSnapshot( + values={}, + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "writes": {"my_key": "my value"}, + "step": -1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + tasks=( + PregelTask( + id=AnyStr(), name="__start__", path=(PULL, "__start__") + ), + ), + ), + ][0] + ) + # get child graph history + child_history = [ + c async for c in app.aget_state_history(outer_history[2].tasks[0].state) + ] + assert child_history == [ + StateSnapshot( + values={"my_key": "hi my value here and there"}, + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "loop", + "writes": {"child_1": {"my_key": "hi my value here and there"}}, + "step": 1, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child", + "langgraph_path": [PULL, AnyStr("child")], + "langgraph_step": 2, + "langgraph_triggers": [AnyStr("parent_1")], + "langgraph_checkpoint_ns": AnyStr("child:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + tasks=(), + ), + StateSnapshot( + values={"my_key": "hi my value"}, + next=("child_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "loop", + "writes": None, + "step": 0, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child", + "langgraph_path": [PULL, AnyStr("child")], + "langgraph_step": 2, + "langgraph_triggers": [AnyStr("parent_1")], + "langgraph_checkpoint_ns": AnyStr("child:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="child_1", + path=(PULL, "child_1"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + } + }, + result={"my_key": "hi my value here and there"}, + ), + ), + ), + StateSnapshot( + values={}, + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + {"": AnyStr(), AnyStr("child:"): AnyStr()} + ), + } + }, + metadata={ + "source": "input", + "writes": {"__start__": {"my_key": "hi my value"}}, + "step": -1, + "parents": {"": AnyStr()}, + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child", + "langgraph_path": [PULL, AnyStr("child")], + "langgraph_step": 2, + "langgraph_triggers": [AnyStr("parent_1")], + "langgraph_checkpoint_ns": AnyStr("child:"), + }, + created_at=AnyStr(), + parent_config=None, + tasks=( + PregelTask( + id=AnyStr(), + name="__start__", + path=(PULL, "__start__"), + result={"my_key": "hi my value"}, + ), + ), + ), + ] + # get grandchild graph history + grandchild_history = [ + c async for c in app.aget_state_history(child_history[1].tasks[0].state) + ] + assert grandchild_history == [ + StateSnapshot( + values={"my_key": "hi my value here and there"}, + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + metadata={ + "source": "loop", + "writes": { + "grandchild_2": {"my_key": "hi my value here and there"} + }, + "step": 2, + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [ + PULL, + AnyStr("child_1"), + ], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + tasks=(), + ), + StateSnapshot( + values={"my_key": "hi my value here"}, + next=("grandchild_2",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + metadata={ + "source": "loop", + "writes": {"grandchild_1": {"my_key": "hi my value here"}}, + "step": 1, + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [ + PULL, + AnyStr("child_1"), + ], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="grandchild_2", + path=(PULL, "grandchild_2"), + result={"my_key": "hi my value here and there"}, + ), + ), + ), + StateSnapshot( + values={"my_key": "hi my value"}, + next=("grandchild_1",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + metadata={ + "source": "loop", + "writes": None, + "step": 0, + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [ + PULL, + AnyStr("child_1"), + ], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="grandchild_1", + path=(PULL, "grandchild_1"), + result={"my_key": "hi my value here"}, + ), + ), + ), + StateSnapshot( + values={}, + next=("__start__",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr(), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + AnyStr(re.compile(r"child:.+|child1:")): AnyStr(), + } + ), + } + }, + metadata={ + "source": "input", + "writes": {"__start__": {"my_key": "hi my value"}}, + "step": -1, + "parents": AnyDict( + { + "": AnyStr(), + AnyStr("child:"): AnyStr(), + } + ), + "thread_id": "1", + "checkpoint_ns": AnyStr("child:"), + "langgraph_checkpoint_ns": AnyStr("child:"), + "langgraph_node": "child_1", + "langgraph_path": [ + PULL, + AnyStr("child_1"), + ], + "langgraph_step": 1, + "langgraph_triggers": [AnyStr("start:child_1")], + }, + created_at=AnyStr(), + parent_config=None, + tasks=( + PregelTask( + id=AnyStr(), + name="__start__", + path=(PULL, "__start__"), + result={"my_key": "hi my value"}, + ), + ), + ), + ] + + # replay grandchild checkpoint + assert [ + c + async for c in app.astream( + None, grandchild_history[2].config, subgraphs=True + ) + ] == [ + ( + (AnyStr("child:"), AnyStr("child_1:")), + {"grandchild_1": {"my_key": "hi my value here"}}, + ), + ((), {"__interrupt__": ()}), + ] + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_send_to_nested_graphs(checkpointer_name: str) -> None: + class OverallState(TypedDict): + subjects: list[str] + jokes: Annotated[list[str], operator.add] + + async def continue_to_jokes(state: OverallState): + return [Send("generate_joke", {"subject": s}) for s in state["subjects"]] + + class JokeState(TypedDict): + subject: str + + async def edit(state: JokeState): + subject = state["subject"] + return {"subject": f"{subject} - hohoho"} + + # subgraph + subgraph = StateGraph(JokeState, output=OverallState) + subgraph.add_node("edit", edit) + subgraph.add_node( + "generate", lambda state: {"jokes": [f"Joke about {state['subject']}"]} + ) + subgraph.set_entry_point("edit") + subgraph.add_edge("edit", "generate") + subgraph.set_finish_point("generate") + + # parent graph + builder = StateGraph(OverallState) + builder.add_node( + "generate_joke", + subgraph.compile(interrupt_before=["generate"]), + ) + builder.add_conditional_edges(START, continue_to_jokes) + builder.add_edge("generate_joke", END) + + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = builder.compile(checkpointer=checkpointer) + config = {"configurable": {"thread_id": "1"}} + tracer = FakeTracer() + + # invoke and pause at nested interrupt + assert await graph.ainvoke( + {"subjects": ["cats", "dogs"]}, + config={**config, "callbacks": [tracer]}, + ) == { + "subjects": ["cats", "dogs"], + "jokes": [], + } + assert len(tracer.runs) == 1, "Should produce exactly 1 root run" + + # check state + outer_state = await graph.aget_state(config) + + if not FF_SEND_V2: + # update state of dogs joke graph + await graph.aupdate_state( + outer_state.tasks[1].state, {"subject": "turtles - hohoho"} + ) + + # continue past interrupt + assert await graph.ainvoke(None, config=config) == { + "subjects": ["cats", "dogs"], + "jokes": ["Joke about cats - hohoho", "Joke about turtles - hohoho"], + } + return + + assert outer_state == StateSnapshot( + values={"subjects": ["cats", "dogs"], "jokes": []}, + tasks=( + PregelTask( + id=AnyStr(), + name="__start__", + path=("__pregel_pull", "__start__"), + error=None, + interrupts=(), + state=None, + result={"subjects": ["cats", "dogs"]}, + ), + PregelTask( + AnyStr(), + "generate_joke", + (PUSH, ("__pregel_pull", "__start__"), 1, AnyStr()), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + } + }, + ), + PregelTask( + AnyStr(), + "generate_joke", + (PUSH, ("__pregel_pull", "__start__"), 2, AnyStr()), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + } + }, + ), + ), + next=("generate_joke", "generate_joke"), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "writes": { + "__start__": { + "subjects": [ + "cats", + "dogs", + ], + } + }, + "step": -1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ) + + # update state of dogs joke graph + await graph.aupdate_state( + outer_state.tasks[2].state, {"subject": "turtles - hohoho"} + ) + + # continue past interrupt + assert await graph.ainvoke(None, config=config) == { + "subjects": ["cats", "dogs"], + "jokes": ["Joke about cats - hohoho", "Joke about turtles - hohoho"], + } + + actual_snapshot = await graph.aget_state(config) + expected_snapshot = StateSnapshot( + values={ + "subjects": ["cats", "dogs"], + "jokes": ["Joke about cats - hohoho", "Joke about turtles - hohoho"], + }, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "generate_joke": [ + {"jokes": ["Joke about cats - hohoho"]}, + {"jokes": ["Joke about turtles - hohoho"]}, + ] + }, + "step": 0, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ) + assert actual_snapshot == expected_snapshot + + # test full history + actual_history = [c async for c in graph.aget_state_history(config)] + expected_history = [ + StateSnapshot( + values={ + "subjects": ["cats", "dogs"], + "jokes": [ + "Joke about cats - hohoho", + "Joke about turtles - hohoho", + ], + }, + tasks=(), + next=(), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "loop", + "writes": { + "generate_joke": [ + {"jokes": ["Joke about cats - hohoho"]}, + {"jokes": ["Joke about turtles - hohoho"]}, + ] + }, + "step": 0, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + ), + StateSnapshot( + values={"jokes": []}, + next=("__start__", "generate_joke", "generate_joke"), + tasks=( + PregelTask( + id=AnyStr(), + name="__start__", + path=("__pregel_pull", "__start__"), + error=None, + interrupts=(), + state=None, + result={"subjects": ["cats", "dogs"]}, + ), + PregelTask( + AnyStr(), + "generate_joke", + (PUSH, ("__pregel_pull", "__start__"), 1, AnyStr()), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + } + }, + result={"jokes": ["Joke about cats - hohoho"]}, + ), + PregelTask( + AnyStr(), + "generate_joke", + (PUSH, ("__pregel_pull", "__start__"), 2, AnyStr()), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("generate_joke:"), + } + }, + result={"jokes": ["Joke about turtles - hohoho"]}, + ), + ), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "parents": {}, + "source": "input", + "writes": {"__start__": {"subjects": ["cats", "dogs"]}}, + "step": -1, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config=None, + ), + ] + assert actual_history == expected_history + + +@pytest.mark.skipif( + sys.version_info < (3, 11), + reason="Python 3.11+ is required for async contextvars support", +) +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +async def test_weather_subgraph( + checkpointer_name: str, snapshot: SnapshotAssertion +) -> None: + from langchain_core.language_models.fake_chat_models import ( + FakeMessagesListChatModel, + ) + from langchain_core.messages import AIMessage, ToolCall + from langchain_core.tools import tool + + from langgraph.graph import MessagesState + + # setup subgraph + + @tool + def get_weather(city: str): + """Get the weather for a specific city""" + return f"I'ts sunny in {city}!" + + weather_model = FakeMessagesListChatModel( + responses=[ + AIMessage( + content="", + tool_calls=[ + ToolCall( + id="tool_call123", + name="get_weather", + args={"city": "San Francisco"}, + ) + ], + ) + ] + ) + + class SubGraphState(MessagesState): + city: str + + def model_node(state: SubGraphState, writer: StreamWriter): + writer(" very") + result = weather_model.invoke(state["messages"]) + return {"city": cast(AIMessage, result).tool_calls[0]["args"]["city"]} + + def weather_node(state: SubGraphState, writer: StreamWriter): + writer(" good") + result = get_weather.invoke({"city": state["city"]}) + return {"messages": [{"role": "assistant", "content": result}]} + + subgraph = StateGraph(SubGraphState) + subgraph.add_node(model_node) + subgraph.add_node(weather_node) + subgraph.add_edge(START, "model_node") + subgraph.add_edge("model_node", "weather_node") + subgraph.add_edge("weather_node", END) + subgraph = subgraph.compile(interrupt_before=["weather_node"]) + + # setup main graph + + class RouterState(MessagesState): + route: Literal["weather", "other"] + + class Router(TypedDict): + route: Literal["weather", "other"] + + router_model = FakeMessagesListChatModel( + responses=[ + AIMessage( + content="", + tool_calls=[ + ToolCall( + id="tool_call123", + name="router", + args={"dest": "weather"}, + ) + ], + ) + ] + ) + + def router_node(state: RouterState, writer: StreamWriter): + writer("I'm") + system_message = "Classify the incoming query as either about weather or not." + messages = [{"role": "system", "content": system_message}] + state["messages"] + route = router_model.invoke(messages) + return {"route": cast(AIMessage, route).tool_calls[0]["args"]["dest"]} + + def normal_llm_node(state: RouterState): + return {"messages": [AIMessage("Hello!")]} + + def route_after_prediction(state: RouterState): + if state["route"] == "weather": + return "weather_graph" + else: + return "normal_llm_node" + + def weather_graph(state: RouterState): + # this tests that all async checkpointers tested also implement sync methods + # as the subgraph called with sync invoke will use sync checkpointer methods + return subgraph.invoke(state) + + graph = StateGraph(RouterState) + graph.add_node(router_node) + graph.add_node(normal_llm_node) + graph.add_node("weather_graph", weather_graph) + graph.add_edge(START, "router_node") + graph.add_conditional_edges("router_node", route_after_prediction) + graph.add_edge("normal_llm_node", END) + graph.add_edge("weather_graph", END) + + def get_first_in_list(): + return [*graph.get_state_history(config, limit=1)][0] + + async with awith_checkpointer(checkpointer_name) as checkpointer: + graph = graph.compile(checkpointer=checkpointer) + + assert graph.get_graph(xray=1).draw_mermaid() == snapshot + + config = {"configurable": {"thread_id": "1"}} + thread2 = {"configurable": {"thread_id": "2"}} + inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]} + + # run with custom output + assert [ + c async for c in graph.astream(inputs, thread2, stream_mode="custom") + ] == [ + "I'm", + " very", + ] + assert [ + c async for c in graph.astream(None, thread2, stream_mode="custom") + ] == [ + " good", + ] + + # run until interrupt + assert [ + c + async for c in graph.astream( + inputs, config=config, stream_mode="updates", subgraphs=True + ) + ] == [ + ((), {"router_node": {"route": "weather"}}), + ((AnyStr("weather_graph:"),), {"model_node": {"city": "San Francisco"}}), + ((), {"__interrupt__": ()}), + ] + + # check current state + state = await graph.aget_state(config) + assert state == StateSnapshot( + values={ + "messages": [_AnyIdHumanMessage(content="what's the weather in sf")], + "route": "weather", + }, + next=("weather_graph",), + config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": {"router_node": {"route": "weather"}}, + "step": 1, + "parents": {}, + "thread_id": "1", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="weather_graph", + path=(PULL, "weather_graph"), + state={ + "configurable": { + "thread_id": "1", + "checkpoint_ns": AnyStr("weather_graph:"), + } + }, + ), + ), + ) + # confirm that list() delegates to alist() correctly + assert await asyncio.to_thread(get_first_in_list) == state + + # update + await graph.aupdate_state(state.tasks[0].state, {"city": "la"}) + + # run after update + assert [ + c + async for c in graph.astream( + None, config=config, stream_mode="updates", subgraphs=True + ) + ] == [ + ( + (AnyStr("weather_graph:"),), + { + "weather_node": { + "messages": [ + {"role": "assistant", "content": "I'ts sunny in la!"} + ] + } + }, + ), + ( + (), + { + "weather_graph": { + "messages": [ + _AnyIdHumanMessage(content="what's the weather in sf"), + _AnyIdAIMessage(content="I'ts sunny in la!"), + ] + } + }, + ), + ] + + # try updating acting as weather node + config = {"configurable": {"thread_id": "14"}} + inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]} + assert [ + c + async for c in graph.astream( + inputs, config=config, stream_mode="updates", subgraphs=True + ) + ] == [ + ((), {"router_node": {"route": "weather"}}), + ((AnyStr("weather_graph:"),), {"model_node": {"city": "San Francisco"}}), + ((), {"__interrupt__": ()}), + ] + state = await graph.aget_state(config, subgraphs=True) + assert state == StateSnapshot( + values={ + "messages": [_AnyIdHumanMessage(content="what's the weather in sf")], + "route": "weather", + }, + next=("weather_graph",), + config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": {"router_node": {"route": "weather"}}, + "step": 1, + "parents": {}, + "thread_id": "14", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="weather_graph", + path=(PULL, "weather_graph"), + state=StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what's the weather in sf") + ], + "city": "San Francisco", + }, + next=("weather_node",), + config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": AnyStr("weather_graph:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("weather_graph:"): AnyStr(), + } + ), + } + }, + metadata={ + "source": "loop", + "writes": {"model_node": {"city": "San Francisco"}}, + "step": 1, + "parents": {"": AnyStr()}, + "thread_id": "14", + "checkpoint_ns": AnyStr("weather_graph:"), + "langgraph_node": "weather_graph", + "langgraph_path": [PULL, "weather_graph"], + "langgraph_step": 2, + "langgraph_triggers": [ + "branch:router_node:route_after_prediction:weather_graph" + ], + "langgraph_checkpoint_ns": AnyStr("weather_graph:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": AnyStr("weather_graph:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("weather_graph:"): AnyStr(), + } + ), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="weather_node", + path=(PULL, "weather_node"), + ), + ), + ), + ), + ), + ) + await graph.aupdate_state( + state.tasks[0].state.config, + {"messages": [{"role": "assistant", "content": "rainy"}]}, + as_node="weather_node", + ) + state = await graph.aget_state(config, subgraphs=True) + assert state == StateSnapshot( + values={ + "messages": [_AnyIdHumanMessage(content="what's the weather in sf")], + "route": "weather", + }, + next=("weather_graph",), + config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + metadata={ + "source": "loop", + "writes": {"router_node": {"route": "weather"}}, + "step": 1, + "parents": {}, + "thread_id": "14", + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": "", + "checkpoint_id": AnyStr(), + } + }, + tasks=( + PregelTask( + id=AnyStr(), + name="weather_graph", + path=(PULL, "weather_graph"), + state=StateSnapshot( + values={ + "messages": [ + _AnyIdHumanMessage(content="what's the weather in sf"), + _AnyIdAIMessage(content="rainy"), + ], + "city": "San Francisco", + }, + next=(), + config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": AnyStr("weather_graph:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("weather_graph:"): AnyStr(), + } + ), + } + }, + metadata={ + "step": 2, + "source": "update", + "writes": { + "weather_node": { + "messages": [ + {"role": "assistant", "content": "rainy"} + ] + } + }, + "parents": {"": AnyStr()}, + "thread_id": "14", + "checkpoint_id": AnyStr(), + "checkpoint_ns": AnyStr("weather_graph:"), + "langgraph_node": "weather_graph", + "langgraph_path": [PULL, "weather_graph"], + "langgraph_step": 2, + "langgraph_triggers": [ + "branch:router_node:route_after_prediction:weather_graph" + ], + "langgraph_checkpoint_ns": AnyStr("weather_graph:"), + }, + created_at=AnyStr(), + parent_config={ + "configurable": { + "thread_id": "14", + "checkpoint_ns": AnyStr("weather_graph:"), + "checkpoint_id": AnyStr(), + "checkpoint_map": AnyDict( + { + "": AnyStr(), + AnyStr("weather_graph:"): AnyStr(), + } + ), + } + }, + tasks=(), + ), + ), + ), + ) + assert [ + c + async for c in graph.astream( + None, config=config, stream_mode="updates", subgraphs=True + ) + ] == [ + ( + (), + { + "weather_graph": { + "messages": [ + _AnyIdHumanMessage(content="what's the weather in sf"), + _AnyIdAIMessage(content="rainy"), + ] + } + }, + ), + ] + + +async def test_checkpoint_metadata() -> None: + """This test verifies that a run's configurable fields are merged with the + previous checkpoint config for each step in the run. + """ + # set up test + from langchain_core.language_models.fake_chat_models import ( + FakeMessagesListChatModel, + ) + from langchain_core.messages import AIMessage, AnyMessage + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.tools import tool + + # graph state + class BaseState(TypedDict): + messages: Annotated[list[AnyMessage], add_messages] + + # initialize graph nodes + @tool() + def search_api(query: str) -> str: + """Searches the API for the query.""" + return f"result for {query}" + + tools = [search_api] + + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a nice assistant."), + ("placeholder", "{messages}"), + ] + ) + + model = FakeMessagesListChatModel( + responses=[ + AIMessage( + content="", + tool_calls=[ + { + "id": "tool_call123", + "name": "search_api", + "args": {"query": "query"}, + }, + ], + ), + AIMessage(content="answer"), + ] + ) + + def agent(state: BaseState, config: RunnableConfig) -> BaseState: + formatted = prompt.invoke(state) + response = model.invoke(formatted) + return {"messages": response} + + def should_continue(data: BaseState) -> str: + # Logic to decide whether to continue in the loop or exit + if not data["messages"][-1].tool_calls: + return "exit" + else: + return "continue" + + # define graphs w/ and w/o interrupt + workflow = StateGraph(BaseState) + workflow.add_node("agent", agent) + workflow.add_node("tools", ToolNode(tools)) + workflow.set_entry_point("agent") + workflow.add_conditional_edges( + "agent", should_continue, {"continue": "tools", "exit": END} + ) + workflow.add_edge("tools", "agent") + + # graph w/o interrupt + checkpointer_1 = MemorySaverAssertCheckpointMetadata() + app = workflow.compile(checkpointer=checkpointer_1) + + # graph w/ interrupt + checkpointer_2 = MemorySaverAssertCheckpointMetadata() + app_w_interrupt = workflow.compile( + checkpointer=checkpointer_2, interrupt_before=["tools"] + ) + + # assertions + + # invoke graph w/o interrupt + await app.ainvoke( + {"messages": ["what is weather in sf"]}, + { + "configurable": { + "thread_id": "1", + "test_config_1": "foo", + "test_config_2": "bar", + }, + }, + ) + + config = {"configurable": {"thread_id": "1"}} + + # assert that checkpoint metadata contains the run's configurable fields + chkpnt_metadata_1 = (await checkpointer_1.aget_tuple(config)).metadata + assert chkpnt_metadata_1["thread_id"] == "1" + assert chkpnt_metadata_1["test_config_1"] == "foo" + assert chkpnt_metadata_1["test_config_2"] == "bar" + + # Verify that all checkpoint metadata have the expected keys. This check + # is needed because a run may have an arbitrary number of steps depending + # on how the graph is constructed. + chkpnt_tuples_1 = checkpointer_1.alist(config) + async for chkpnt_tuple in chkpnt_tuples_1: + assert chkpnt_tuple.metadata["thread_id"] == "1" + assert chkpnt_tuple.metadata["test_config_1"] == "foo" + assert chkpnt_tuple.metadata["test_config_2"] == "bar" + + # invoke graph, but interrupt before tool call + await app_w_interrupt.ainvoke( + {"messages": ["what is weather in sf"]}, + { + "configurable": { + "thread_id": "2", + "test_config_3": "foo", + "test_config_4": "bar", + }, + }, + ) + + config = {"configurable": {"thread_id": "2"}} + + # assert that checkpoint metadata contains the run's configurable fields + chkpnt_metadata_2 = (await checkpointer_2.aget_tuple(config)).metadata + assert chkpnt_metadata_2["thread_id"] == "2" + assert chkpnt_metadata_2["test_config_3"] == "foo" + assert chkpnt_metadata_2["test_config_4"] == "bar" + + # resume graph execution + await app_w_interrupt.ainvoke( + input=None, + config={ + "configurable": { + "thread_id": "2", + "test_config_3": "foo", + "test_config_4": "bar", + } + }, + ) + + # assert that checkpoint metadata contains the run's configurable fields + chkpnt_metadata_3 = (await checkpointer_2.aget_tuple(config)).metadata + assert chkpnt_metadata_3["thread_id"] == "2" + assert chkpnt_metadata_3["test_config_3"] == "foo" + assert chkpnt_metadata_3["test_config_4"] == "bar" + + # Verify that all checkpoint metadata have the expected keys. This check + # is needed because a run may have an arbitrary number of steps depending + # on how the graph is constructed. + chkpnt_tuples_2 = checkpointer_2.alist(config) + async for chkpnt_tuple in chkpnt_tuples_2: + assert chkpnt_tuple.metadata["thread_id"] == "2" + assert chkpnt_tuple.metadata["test_config_3"] == "foo" + assert chkpnt_tuple.metadata["test_config_4"] == "bar" + + +async def test_checkpointer_null_pending_writes() -> None: + class Node: + def __init__(self, name: str): + self.name = name + self.__name__ = name + + def __call__(self, state): + return [self.name] + + builder = StateGraph(Annotated[list, operator.add]) + builder.add_node(Node("1")) + builder.add_edge(START, "1") + graph = builder.compile(checkpointer=MemorySaverNoPending()) + assert graph.invoke([], {"configurable": {"thread_id": "foo"}}) == ["1"] + assert graph.invoke([], {"configurable": {"thread_id": "foo"}}) == ["1"] * 2 + assert (await graph.ainvoke([], {"configurable": {"thread_id": "foo"}})) == [ + "1" + ] * 3 + assert (await graph.ainvoke([], {"configurable": {"thread_id": "foo"}})) == [ + "1" + ] * 4 + + +@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC) +@pytest.mark.parametrize("store_name", ALL_STORES_ASYNC) +async def test_store_injected_async(checkpointer_name: str, store_name: str) -> None: + class State(TypedDict): + count: Annotated[int, operator.add] + + doc_id = str(uuid.uuid4()) + doc = {"some-key": "this-is-a-val"} + + async def node(input: State, config: RunnableConfig, store: BaseStore): + assert isinstance(store, BaseStore) + await store.aput( + ("foo", "bar"), + doc_id, + { + **doc, + "from_thread": config["configurable"]["thread_id"], + "some_val": input["count"], + }, + ) + return {"count": 1} + + builder = StateGraph(State) + builder.add_node("node", node) + builder.add_edge("__start__", "node") + async with awith_checkpointer(checkpointer_name) as checkpointer, awith_store( + store_name + ) as the_store: + graph = builder.compile(store=the_store, checkpointer=checkpointer) + + thread_1 = str(uuid.uuid4()) + result = await graph.ainvoke( + {"count": 0}, {"configurable": {"thread_id": thread_1}} + ) + assert result == {"count": 1} + returned_doc = (await the_store.aget(("foo", "bar"), doc_id)).value + assert returned_doc == {**doc, "from_thread": thread_1, "some_val": 0} + assert len(await the_store.asearch(("foo", "bar"))) == 1 + + # Check update on existing thread + result = await graph.ainvoke( + {"count": 0}, {"configurable": {"thread_id": thread_1}} + ) + assert result == {"count": 2} + returned_doc = (await the_store.aget(("foo", "bar"), doc_id)).value + assert returned_doc == {**doc, "from_thread": thread_1, "some_val": 1} + assert len(await the_store.asearch(("foo", "bar"))) == 1 + + thread_2 = str(uuid.uuid4()) + + result = await graph.ainvoke( + {"count": 0}, {"configurable": {"thread_id": thread_2}} + ) + assert result == {"count": 1} + returned_doc = (await the_store.aget(("foo", "bar"), doc_id)).value + assert returned_doc == { + **doc, + "from_thread": thread_2, + "some_val": 0, + } # Overwrites the whole doc + assert ( + len(await the_store.asearch(("foo", "bar"))) == 1 + ) # still overwriting the same one + + +async def test_debug_retry(): + class State(TypedDict): + messages: Annotated[list[str], operator.add] + + def node(name): + async def _node(state: State): + return {"messages": [f"entered {name} node"]} + + return _node + + builder = StateGraph(State) + builder.add_node("one", node("one")) + builder.add_node("two", node("two")) + builder.add_edge(START, "one") + builder.add_edge("one", "two") + builder.add_edge("two", END) + + saver = MemorySaver() + + graph = builder.compile(checkpointer=saver) + + config = {"configurable": {"thread_id": "1"}} + await graph.ainvoke({"messages": []}, config=config) + + # re-run step: 1 + async for c in saver.alist(config): + if c.metadata["step"] == 1: + target_config = c.parent_config + break + assert target_config is not None + + update_config = await graph.aupdate_state(target_config, values=None) + + events = [ + c async for c in graph.astream(None, config=update_config, stream_mode="debug") + ] + + checkpoint_events = list( + reversed([e["payload"] for e in events if e["type"] == "checkpoint"]) + ) + + checkpoint_history = { + c.config["configurable"]["checkpoint_id"]: c + async for c in graph.aget_state_history(config) + } + + def lax_normalize_config(config: Optional[dict]) -> Optional[dict]: + if config is None: + return None + return config["configurable"] + + for stream in checkpoint_events: + stream_conf = lax_normalize_config(stream["config"]) + stream_parent_conf = lax_normalize_config(stream["parent_config"]) + assert stream_conf != stream_parent_conf + + # ensure the streamed checkpoint == checkpoint from checkpointer.list() + history = checkpoint_history[stream["config"]["configurable"]["checkpoint_id"]] + history_conf = lax_normalize_config(history.config) + assert stream_conf == history_conf + + history_parent_conf = lax_normalize_config(history.parent_config) + assert stream_parent_conf == history_parent_conf + + +async def test_debug_subgraphs(): + class State(TypedDict): + messages: Annotated[list[str], operator.add] + + def node(name): + async def _node(state: State): + return {"messages": [f"entered {name} node"]} + + return _node + + parent = StateGraph(State) + child = StateGraph(State) + + child.add_node("c_one", node("c_one")) + child.add_node("c_two", node("c_two")) + child.add_edge(START, "c_one") + child.add_edge("c_one", "c_two") + child.add_edge("c_two", END) + + parent.add_node("p_one", node("p_one")) + parent.add_node("p_two", child.compile()) + parent.add_edge(START, "p_one") + parent.add_edge("p_one", "p_two") + parent.add_edge("p_two", END) + + graph = parent.compile(checkpointer=MemorySaver()) + + config = {"configurable": {"thread_id": "1"}} + events = [ + c + async for c in graph.astream( + {"messages": []}, + config=config, + stream_mode="debug", + ) + ] + + checkpoint_events = list( + reversed([e["payload"] for e in events if e["type"] == "checkpoint"]) + ) + checkpoint_history = [c async for c in graph.aget_state_history(config)] + + assert len(checkpoint_events) == len(checkpoint_history) + + def normalize_config(config: Optional[dict]) -> Optional[dict]: + if config is None: + return None + return config["configurable"] + + for stream, history in zip(checkpoint_events, checkpoint_history): + assert stream["values"] == history.values + assert stream["next"] == list(history.next) + assert normalize_config(stream["config"]) == normalize_config(history.config) + assert normalize_config(stream["parent_config"]) == normalize_config( + history.parent_config + ) + + assert len(stream["tasks"]) == len(history.tasks) + for stream_task, history_task in zip(stream["tasks"], history.tasks): + assert stream_task["id"] == history_task.id + assert stream_task["name"] == history_task.name + assert stream_task["interrupts"] == history_task.interrupts + assert stream_task.get("error") == history_task.error + assert stream_task.get("state") == history_task.state + + +async def test_debug_nested_subgraphs(): + from collections import defaultdict + + class State(TypedDict): + messages: Annotated[list[str], operator.add] + + def node(name): + async def _node(state: State): + return {"messages": [f"entered {name} node"]} + + return _node + + grand_parent = StateGraph(State) + parent = StateGraph(State) + child = StateGraph(State) + + child.add_node("c_one", node("c_one")) + child.add_node("c_two", node("c_two")) + child.add_edge(START, "c_one") + child.add_edge("c_one", "c_two") + child.add_edge("c_two", END) + + parent.add_node("p_one", node("p_one")) + parent.add_node("p_two", child.compile()) + parent.add_edge(START, "p_one") + parent.add_edge("p_one", "p_two") + parent.add_edge("p_two", END) + + grand_parent.add_node("gp_one", node("gp_one")) + grand_parent.add_node("gp_two", parent.compile()) + grand_parent.add_edge(START, "gp_one") + grand_parent.add_edge("gp_one", "gp_two") + grand_parent.add_edge("gp_two", END) + + graph = grand_parent.compile(checkpointer=MemorySaver()) + + config = {"configurable": {"thread_id": "1"}} + events = [ + c + async for c in graph.astream( + {"messages": []}, + config=config, + stream_mode="debug", + subgraphs=True, + ) + ] + + stream_ns: dict[tuple, dict] = defaultdict(list) + for ns, e in events: + if e["type"] == "checkpoint": + stream_ns[ns].append(e["payload"]) + + assert list(stream_ns.keys()) == [ + (), + (AnyStr("gp_two:"),), + (AnyStr("gp_two:"), AnyStr("p_two:")), + ] + + history_ns = {} + for ns in stream_ns.keys(): + + async def get_history(namespace): + history = [ + c + async for c in graph.aget_state_history( + { + "configurable": { + "thread_id": "1", + "checkpoint_ns": "|".join(namespace), + } + } + ) + ] + return history[::-1] + + history_ns[ns] = await get_history(ns) + + def normalize_config(config: Optional[dict]) -> Optional[dict]: + if config is None: + return None + + clean_config = {} + clean_config["thread_id"] = config["configurable"]["thread_id"] + clean_config["checkpoint_id"] = config["configurable"]["checkpoint_id"] + clean_config["checkpoint_ns"] = config["configurable"]["checkpoint_ns"] + if "checkpoint_map" in config["configurable"]: + clean_config["checkpoint_map"] = config["configurable"]["checkpoint_map"] + + return clean_config + + for checkpoint_events, checkpoint_history in zip( + stream_ns.values(), history_ns.values() + ): + for stream, history in zip(checkpoint_events, checkpoint_history): + assert stream["values"] == history.values + assert stream["next"] == list(history.next) + assert normalize_config(stream["config"]) == normalize_config( + history.config + ) + assert normalize_config(stream["parent_config"]) == normalize_config( + history.parent_config + ) + + assert len(stream["tasks"]) == len(history.tasks) + for stream_task, history_task in zip(stream["tasks"], history.tasks): + assert stream_task["id"] == history_task.id + assert stream_task["name"] == history_task.name + assert stream_task["interrupts"] == history_task.interrupts + assert stream_task.get("error") == history_task.error + assert stream_task.get("state") == history_task.state diff --git a/libs/langgraph-checkpoint-mongodb/tests/unit_tests/conftest.py b/libs/langgraph-checkpoint-mongodb/tests/unit_tests/conftest.py new file mode 100644 index 0000000..eeb0c8a --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/unit_tests/conftest.py @@ -0,0 +1,44 @@ +from typing import Any, Dict + +import pytest +from langchain_core.runnables import RunnableConfig + +from langgraph.checkpoint.base import ( + CheckpointMetadata, + create_checkpoint, + empty_checkpoint, +) + + +@pytest.fixture(scope="session") +def input_data() -> dict: + """Setup and store conveniently in a single dictionary.""" + inputs: Dict[str, Any] = {} + + inputs["config_1"] = RunnableConfig( + configurable=dict(thread_id="thread-1", thread_ts="1", checkpoint_ns="") + ) # config_1 tests deprecated thread_ts + + inputs["config_2"] = RunnableConfig( + configurable=dict(thread_id="thread-2", checkpoint_id="2", checkpoint_ns="") + ) + + inputs["config_3"] = RunnableConfig( + configurable=dict( + thread_id="thread-2", checkpoint_id="2-inner", checkpoint_ns="inner" + ) + ) + + inputs["chkpnt_1"] = empty_checkpoint() + inputs["chkpnt_2"] = create_checkpoint(inputs["chkpnt_1"], {}, 1) + inputs["chkpnt_3"] = empty_checkpoint() + + inputs["metadata_1"] = CheckpointMetadata( + source="input", step=2, writes={}, score=1 + ) + inputs["metadata_2"] = CheckpointMetadata( + source="loop", step=1, writes={"foo": "bar"}, score=None + ) + inputs["metadata_3"] = CheckpointMetadata() + + return inputs diff --git a/libs/langgraph-checkpoint-mongodb/tests/unit_tests/test_async.py b/libs/langgraph-checkpoint-mongodb/tests/unit_tests/test_async.py new file mode 100644 index 0000000..3478313 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/unit_tests/test_async.py @@ -0,0 +1,107 @@ +import os +from typing import Any, Dict + +import pytest +from bson.errors import InvalidDocument +from motor.motor_asyncio import AsyncIOMotorClient + +from langgraph.checkpoint.mongodb.aio import AsyncMongoDBSaver + +MONGODB_URI = os.environ.get("MONGODB_URI", "mongodb://localhost:27017") +DB_NAME = os.environ.get("DB_NAME", "langgraph-test") +COLLECTION_NAME = "sync_checkpoints_aio" + + +async def test_asearch(input_data: Dict[str, Any]) -> None: + # Clear collections if they exist + client: AsyncIOMotorClient = AsyncIOMotorClient(MONGODB_URI) + db = client[DB_NAME] + + for clxn in await db.list_collection_names(): + await db.drop_collection(clxn) + + async with AsyncMongoDBSaver.from_conn_string( + MONGODB_URI, DB_NAME, COLLECTION_NAME + ) as saver: + # save checkpoints + await saver.aput( + input_data["config_1"], + input_data["chkpnt_1"], + input_data["metadata_1"], + {}, + ) + await saver.aput( + input_data["config_2"], + input_data["chkpnt_2"], + input_data["metadata_2"], + {}, + ) + await saver.aput( + input_data["config_3"], + input_data["chkpnt_3"], + input_data["metadata_3"], + {}, + ) + + # call method / assertions + query_1 = {"source": "input"} # search by 1 key + query_2 = { + "step": 1, + "writes": {"foo": "bar"}, + } # search by multiple keys + query_3: dict[str, Any] = {} # search by no keys, return all checkpoints + query_4 = {"source": "update", "step": 1} # no match + + search_results_1 = [c async for c in saver.alist(None, filter=query_1)] + assert len(search_results_1) == 1 + assert search_results_1[0].metadata == input_data["metadata_1"] + + search_results_2 = [c async for c in saver.alist(None, filter=query_2)] + assert len(search_results_2) == 1 + assert search_results_2[0].metadata == input_data["metadata_2"] + + search_results_3 = [c async for c in saver.alist(None, filter=query_3)] + assert len(search_results_3) == 3 + + search_results_4 = [c async for c in saver.alist(None, filter=query_4)] + assert len(search_results_4) == 0 + + # search by config (defaults to checkpoints across all namespaces) + search_results_5 = [ + c async for c in saver.alist({"configurable": {"thread_id": "thread-2"}}) + ] + assert len(search_results_5) == 2 + assert { + search_results_5[0].config["configurable"]["checkpoint_ns"], + search_results_5[1].config["configurable"]["checkpoint_ns"], + } == {"", "inner"} + + +async def test_null_chars(input_data: Dict[str, Any]) -> None: + """In MongoDB string *values* can be any valid UTF-8 including nulls. + *Field names*, however, cannot contain nulls characters.""" + async with AsyncMongoDBSaver.from_conn_string( + MONGODB_URI, DB_NAME, COLLECTION_NAME + ) as saver: + null_str = "\x00abc" # string containing null character + + # 1. null string in field *value* + null_value_cfg = await saver.aput( + input_data["config_1"], + input_data["chkpnt_1"], + {"my_key": null_str}, + {}, + ) + null_tuple = await saver.aget_tuple(null_value_cfg) + assert null_tuple.metadata["my_key"] == null_str # type: ignore + cps = [c async for c in saver.alist(None, filter={"my_key": null_str})] + assert cps[0].metadata["my_key"] == null_str + + # 2. null string in field *name* + with pytest.raises(InvalidDocument): + await saver.aput( + input_data["config_1"], + input_data["chkpnt_1"], + {null_str: "my_value"}, # type: ignore + {}, + ) diff --git a/libs/langgraph-checkpoint-mongodb/tests/unit_tests/test_sync.py b/libs/langgraph-checkpoint-mongodb/tests/unit_tests/test_sync.py new file mode 100644 index 0000000..4b38c70 --- /dev/null +++ b/libs/langgraph-checkpoint-mongodb/tests/unit_tests/test_sync.py @@ -0,0 +1,167 @@ +import os +from typing import Any, Dict + +import pytest +from bson.errors import InvalidDocument +from langchain_core.messages import HumanMessage +from langchain_core.runnables import RunnableConfig +from pymongo import MongoClient + +from langgraph.checkpoint.base import ( + CheckpointMetadata, + empty_checkpoint, +) +from langgraph.checkpoint.mongodb import MongoDBSaver + +# Setup: +# docker run --name mongodb -d -p 27017:27017 mongodb/mongodb-community-server +MONGODB_URI = os.environ.get("MONGODB_URI", "mongodb://localhost:27017") +DB_NAME = os.environ.get("DB_NAME", "langgraph-test") +COLLECTION_NAME = "sync_checkpoints" + + +def test_search(input_data: Dict[str, Any]) -> None: + # Clear collections if they exist + client: MongoClient = MongoClient(MONGODB_URI) + db = client[DB_NAME] + for clxn_name in db.list_collection_names(): + db.drop_collection(clxn_name) + + with MongoDBSaver.from_conn_string(MONGODB_URI, DB_NAME, COLLECTION_NAME) as saver: + # save checkpoints + saver.put( + input_data["config_1"], + input_data["chkpnt_1"], + input_data["metadata_1"], + {}, + ) + saver.put( + input_data["config_2"], + input_data["chkpnt_2"], + input_data["metadata_2"], + {}, + ) + saver.put( + input_data["config_3"], + input_data["chkpnt_3"], + input_data["metadata_3"], + {}, + ) + + # call method / assertions + query_1 = {"source": "input"} # search by 1 key + query_2 = { + "step": 1, + "writes": {"foo": "bar"}, + } # search by multiple keys + query_3: dict[str, Any] = {} # search by no keys, return all checkpoints + query_4 = {"source": "update", "step": 1} # no match + + search_results_1 = list(saver.list(None, filter=query_1)) + assert len(search_results_1) == 1 + assert search_results_1[0].metadata == input_data["metadata_1"] + + search_results_2 = list(saver.list(None, filter=query_2)) + assert len(search_results_2) == 1 + assert search_results_2[0].metadata == input_data["metadata_2"] + + search_results_3 = list(saver.list(None, filter=query_3)) + assert len(search_results_3) == 3 + + search_results_4 = list(saver.list(None, filter=query_4)) + assert len(search_results_4) == 0 + + # search by config (defaults to checkpoints across all namespaces) + search_results_5 = list(saver.list({"configurable": {"thread_id": "thread-2"}})) + assert len(search_results_5) == 2 + assert { + search_results_5[0].config["configurable"]["checkpoint_ns"], + search_results_5[1].config["configurable"]["checkpoint_ns"], + } == {"", "inner"} + + +def test_null_chars(input_data: Dict[str, Any]) -> None: + """In MongoDB string *values* can be any valid UTF-8 including nulls. + *Field names*, however, cannot contain nulls characters.""" + with MongoDBSaver.from_conn_string(MONGODB_URI, DB_NAME, COLLECTION_NAME) as saver: + null_str = "\x00abc" # string containing null character + + # 1. null string in field *value* + null_value_cfg = saver.put( + input_data["config_1"], + input_data["chkpnt_1"], + {"my_key": null_str}, + {}, + ) + assert saver.get_tuple(null_value_cfg).metadata["my_key"] == null_str # type: ignore + assert ( + list(saver.list(None, filter={"my_key": null_str}))[0].metadata["my_key"] # type: ignore + == null_str + ) + + # 2. null string in field *name* + with pytest.raises(InvalidDocument): + saver.put( + input_data["config_1"], + input_data["chkpnt_1"], + {null_str: "my_value"}, # type: ignore + {}, + ) + + +def test_nested_filter() -> None: + """Test one can filter on nested structure of non-trivial objects. + + This test highlights MongoDBSaver's _loads/(_dumps)_metadata methods, + which enable MongoDB's ability to query nested documents, + with the caveat that all keys are strings. + + We use a HumanMessage instance as found in the examples. + The MQL query created is {metadata.writes.message: } + + We also use the same message to check values in the Checkpoint. + """ + + input_message = HumanMessage(content="MongoDB is awesome!") + clxn_name = "writes_message" + thread_id = "thread-3" + + config = RunnableConfig( + configurable=dict(thread_id=thread_id, checkpoint_id="1", checkpoint_ns="") + ) + chkpt = empty_checkpoint() + chkpt["channel_values"] = input_message + + metadata = CheckpointMetadata( + source="loop", step=1, writes={"message": input_message} + ) + + with MongoDBSaver.from_conn_string(MONGODB_URI, DB_NAME, clxn_name) as saver: + saver.put(config, chkpt, metadata, {}) + + results = list(saver.list(None, filter={"writes.message": input_message})) + for cptpl in results: + assert cptpl.metadata["writes"]["message"] == input_message + break + + # Confirm serialization structure of data in collection + doc: dict[str, Any] = saver.checkpoint_collection.find_one( + {"thread_id": thread_id} + ) # type: ignore + assert isinstance(doc["checkpoint"], bytes) + assert ( + isinstance(doc["metadata"], dict) + and isinstance(doc["metadata"]["writes"], dict) + and isinstance(doc["metadata"]["writes"]["message"], bytes) + ) + + # Test values of checkpoint + # From checkpointer + assert cptpl.checkpoint["channel_values"] == input_message + # In database + chkpt_db = saver.serde.loads_typed((doc["type"], doc["checkpoint"])) + assert chkpt_db["channel_values"] == input_message + + # Drop collections + saver.checkpoint_collection.drop() + saver.writes_collection.drop() diff --git a/libs/mongodb/Makefile b/libs/mongodb/Makefile index 63fc9d4..6dc246f 100644 --- a/libs/mongodb/Makefile +++ b/libs/mongodb/Makefile @@ -51,9 +51,9 @@ check_imports: $(shell find langchain_mongodb -name '*.py') help: @echo '----' - @echo 'check_imports - check imports' + @echo 'check_imports - check imports' @echo 'format - run code formatters' @echo 'lint - run linters' @echo 'test - run unit tests' - @echo 'tests - run unit tests' + @echo 'integration_test - run integration tests' @echo 'test TEST_FILE= - run all tests in file' diff --git a/libs/mongodb/poetry.lock b/libs/mongodb/poetry.lock index 6b81e21..f94f11d 100644 --- a/libs/mongodb/poetry.lock +++ b/libs/mongodb/poetry.lock @@ -13,112 +13,98 @@ files = [ [[package]] name = "aiohttp" -version = "3.10.10" +version = "3.11.7" description = "Async http client/server framework (asyncio)" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "aiohttp-3.10.10-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:be7443669ae9c016b71f402e43208e13ddf00912f47f623ee5994e12fc7d4b3f"}, - {file = "aiohttp-3.10.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b06b7843929e41a94ea09eb1ce3927865387e3e23ebe108e0d0d09b08d25be9"}, - {file = "aiohttp-3.10.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:333cf6cf8e65f6a1e06e9eb3e643a0c515bb850d470902274239fea02033e9a8"}, - {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:274cfa632350225ce3fdeb318c23b4a10ec25c0e2c880eff951a3842cf358ac1"}, - {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9e5e4a85bdb56d224f412d9c98ae4cbd032cc4f3161818f692cd81766eee65a"}, - {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b606353da03edcc71130b52388d25f9a30a126e04caef1fd637e31683033abd"}, - {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab5a5a0c7a7991d90446a198689c0535be89bbd6b410a1f9a66688f0880ec026"}, - {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:578a4b875af3e0daaf1ac6fa983d93e0bbfec3ead753b6d6f33d467100cdc67b"}, - {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8105fd8a890df77b76dd3054cddf01a879fc13e8af576805d667e0fa0224c35d"}, - {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3bcd391d083f636c06a68715e69467963d1f9600f85ef556ea82e9ef25f043f7"}, - {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fbc6264158392bad9df19537e872d476f7c57adf718944cc1e4495cbabf38e2a"}, - {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e48d5021a84d341bcaf95c8460b152cfbad770d28e5fe14a768988c461b821bc"}, - {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2609e9ab08474702cc67b7702dbb8a80e392c54613ebe80db7e8dbdb79837c68"}, - {file = "aiohttp-3.10.10-cp310-cp310-win32.whl", hash = "sha256:84afcdea18eda514c25bc68b9af2a2b1adea7c08899175a51fe7c4fb6d551257"}, - {file = "aiohttp-3.10.10-cp310-cp310-win_amd64.whl", hash = "sha256:9c72109213eb9d3874f7ac8c0c5fa90e072d678e117d9061c06e30c85b4cf0e6"}, - {file = "aiohttp-3.10.10-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c30a0eafc89d28e7f959281b58198a9fa5e99405f716c0289b7892ca345fe45f"}, - {file = "aiohttp-3.10.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:258c5dd01afc10015866114e210fb7365f0d02d9d059c3c3415382ab633fcbcb"}, - {file = "aiohttp-3.10.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:15ecd889a709b0080f02721255b3f80bb261c2293d3c748151274dfea93ac871"}, - {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3935f82f6f4a3820270842e90456ebad3af15810cf65932bd24da4463bc0a4c"}, - {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:413251f6fcf552a33c981c4709a6bba37b12710982fec8e558ae944bfb2abd38"}, - {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1720b4f14c78a3089562b8875b53e36b51c97c51adc53325a69b79b4b48ebcb"}, - {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:679abe5d3858b33c2cf74faec299fda60ea9de62916e8b67e625d65bf069a3b7"}, - {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79019094f87c9fb44f8d769e41dbb664d6e8fcfd62f665ccce36762deaa0e911"}, - {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fe2fb38c2ed905a2582948e2de560675e9dfbee94c6d5ccdb1301c6d0a5bf092"}, - {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a3f00003de6eba42d6e94fabb4125600d6e484846dbf90ea8e48a800430cc142"}, - {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1bbb122c557a16fafc10354b9d99ebf2f2808a660d78202f10ba9d50786384b9"}, - {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:30ca7c3b94708a9d7ae76ff281b2f47d8eaf2579cd05971b5dc681db8caac6e1"}, - {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:df9270660711670e68803107d55c2b5949c2e0f2e4896da176e1ecfc068b974a"}, - {file = "aiohttp-3.10.10-cp311-cp311-win32.whl", hash = "sha256:aafc8ee9b742ce75044ae9a4d3e60e3d918d15a4c2e08a6c3c3e38fa59b92d94"}, - {file = "aiohttp-3.10.10-cp311-cp311-win_amd64.whl", hash = "sha256:362f641f9071e5f3ee6f8e7d37d5ed0d95aae656adf4ef578313ee585b585959"}, - {file = "aiohttp-3.10.10-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9294bbb581f92770e6ed5c19559e1e99255e4ca604a22c5c6397b2f9dd3ee42c"}, - {file = "aiohttp-3.10.10-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a8fa23fe62c436ccf23ff930149c047f060c7126eae3ccea005f0483f27b2e28"}, - {file = "aiohttp-3.10.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c6a5b8c7926ba5d8545c7dd22961a107526562da31a7a32fa2456baf040939f"}, - {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:007ec22fbc573e5eb2fb7dec4198ef8f6bf2fe4ce20020798b2eb5d0abda6138"}, - {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9627cc1a10c8c409b5822a92d57a77f383b554463d1884008e051c32ab1b3742"}, - {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:50edbcad60d8f0e3eccc68da67f37268b5144ecc34d59f27a02f9611c1d4eec7"}, - {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a45d85cf20b5e0d0aa5a8dca27cce8eddef3292bc29d72dcad1641f4ed50aa16"}, - {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b00807e2605f16e1e198f33a53ce3c4523114059b0c09c337209ae55e3823a8"}, - {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f2d4324a98062be0525d16f768a03e0bbb3b9fe301ceee99611dc9a7953124e6"}, - {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:438cd072f75bb6612f2aca29f8bd7cdf6e35e8f160bc312e49fbecab77c99e3a"}, - {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:baa42524a82f75303f714108fea528ccacf0386af429b69fff141ffef1c534f9"}, - {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a7d8d14fe962153fc681f6366bdec33d4356f98a3e3567782aac1b6e0e40109a"}, - {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c1277cd707c465cd09572a774559a3cc7c7a28802eb3a2a9472588f062097205"}, - {file = "aiohttp-3.10.10-cp312-cp312-win32.whl", hash = "sha256:59bb3c54aa420521dc4ce3cc2c3fe2ad82adf7b09403fa1f48ae45c0cbde6628"}, - {file = "aiohttp-3.10.10-cp312-cp312-win_amd64.whl", hash = "sha256:0e1b370d8007c4ae31ee6db7f9a2fe801a42b146cec80a86766e7ad5c4a259cf"}, - {file = "aiohttp-3.10.10-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ad7593bb24b2ab09e65e8a1d385606f0f47c65b5a2ae6c551db67d6653e78c28"}, - {file = "aiohttp-3.10.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1eb89d3d29adaf533588f209768a9c02e44e4baf832b08118749c5fad191781d"}, - {file = "aiohttp-3.10.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3fe407bf93533a6fa82dece0e74dbcaaf5d684e5a51862887f9eaebe6372cd79"}, - {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aed5155f819873d23520919e16703fc8925e509abbb1a1491b0087d1cd969e"}, - {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f05e9727ce409358baa615dbeb9b969db94324a79b5a5cea45d39bdb01d82e6"}, - {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dffb610a30d643983aeb185ce134f97f290f8935f0abccdd32c77bed9388b42"}, - {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa6658732517ddabe22c9036479eabce6036655ba87a0224c612e1ae6af2087e"}, - {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:741a46d58677d8c733175d7e5aa618d277cd9d880301a380fd296975a9cdd7bc"}, - {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e00e3505cd80440f6c98c6d69269dcc2a119f86ad0a9fd70bccc59504bebd68a"}, - {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ffe595f10566f8276b76dc3a11ae4bb7eba1aac8ddd75811736a15b0d5311414"}, - {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bdfcf6443637c148c4e1a20c48c566aa694fa5e288d34b20fcdc58507882fed3"}, - {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d183cf9c797a5291e8301790ed6d053480ed94070637bfaad914dd38b0981f67"}, - {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:77abf6665ae54000b98b3c742bc6ea1d1fb31c394bcabf8b5d2c1ac3ebfe7f3b"}, - {file = "aiohttp-3.10.10-cp313-cp313-win32.whl", hash = "sha256:4470c73c12cd9109db8277287d11f9dd98f77fc54155fc71a7738a83ffcc8ea8"}, - {file = "aiohttp-3.10.10-cp313-cp313-win_amd64.whl", hash = "sha256:486f7aabfa292719a2753c016cc3a8f8172965cabb3ea2e7f7436c7f5a22a151"}, - {file = "aiohttp-3.10.10-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1b66ccafef7336a1e1f0e389901f60c1d920102315a56df85e49552308fc0486"}, - {file = "aiohttp-3.10.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:acd48d5b80ee80f9432a165c0ac8cbf9253eaddb6113269a5e18699b33958dbb"}, - {file = "aiohttp-3.10.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3455522392fb15ff549d92fbf4b73b559d5e43dc522588f7eb3e54c3f38beee7"}, - {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45c3b868724137f713a38376fef8120c166d1eadd50da1855c112fe97954aed8"}, - {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:da1dee8948d2137bb51fbb8a53cce6b1bcc86003c6b42565f008438b806cccd8"}, - {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c5ce2ce7c997e1971b7184ee37deb6ea9922ef5163c6ee5aa3c274b05f9e12fa"}, - {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28529e08fde6f12eba8677f5a8608500ed33c086f974de68cc65ab218713a59d"}, - {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7db54c7914cc99d901d93a34704833568d86c20925b2762f9fa779f9cd2e70f"}, - {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:03a42ac7895406220124c88911ebee31ba8b2d24c98507f4a8bf826b2937c7f2"}, - {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:7e338c0523d024fad378b376a79faff37fafb3c001872a618cde1d322400a572"}, - {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:038f514fe39e235e9fef6717fbf944057bfa24f9b3db9ee551a7ecf584b5b480"}, - {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:64f6c17757251e2b8d885d728b6433d9d970573586a78b78ba8929b0f41d045a"}, - {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:93429602396f3383a797a2a70e5f1de5df8e35535d7806c9f91df06f297e109b"}, - {file = "aiohttp-3.10.10-cp38-cp38-win32.whl", hash = "sha256:c823bc3971c44ab93e611ab1a46b1eafeae474c0c844aff4b7474287b75fe49c"}, - {file = "aiohttp-3.10.10-cp38-cp38-win_amd64.whl", hash = "sha256:54ca74df1be3c7ca1cf7f4c971c79c2daf48d9aa65dea1a662ae18926f5bc8ce"}, - {file = "aiohttp-3.10.10-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:01948b1d570f83ee7bbf5a60ea2375a89dfb09fd419170e7f5af029510033d24"}, - {file = "aiohttp-3.10.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9fc1500fd2a952c5c8e3b29aaf7e3cc6e27e9cfc0a8819b3bce48cc1b849e4cc"}, - {file = "aiohttp-3.10.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f614ab0c76397661b90b6851a030004dac502e48260ea10f2441abd2207fbcc7"}, - {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00819de9e45d42584bed046314c40ea7e9aea95411b38971082cad449392b08c"}, - {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05646ebe6b94cc93407b3bf34b9eb26c20722384d068eb7339de802154d61bc5"}, - {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:998f3bd3cfc95e9424a6acd7840cbdd39e45bc09ef87533c006f94ac47296090"}, - {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9010c31cd6fa59438da4e58a7f19e4753f7f264300cd152e7f90d4602449762"}, - {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ea7ffc6d6d6f8a11e6f40091a1040995cdff02cfc9ba4c2f30a516cb2633554"}, - {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ef9c33cc5cbca35808f6c74be11eb7f5f6b14d2311be84a15b594bd3e58b5527"}, - {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ce0cdc074d540265bfeb31336e678b4e37316849d13b308607efa527e981f5c2"}, - {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:597a079284b7ee65ee102bc3a6ea226a37d2b96d0418cc9047490f231dc09fe8"}, - {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:7789050d9e5d0c309c706953e5e8876e38662d57d45f936902e176d19f1c58ab"}, - {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e7f8b04d83483577fd9200461b057c9f14ced334dcb053090cea1da9c8321a91"}, - {file = "aiohttp-3.10.10-cp39-cp39-win32.whl", hash = "sha256:c02a30b904282777d872266b87b20ed8cc0d1501855e27f831320f471d54d983"}, - {file = "aiohttp-3.10.10-cp39-cp39-win_amd64.whl", hash = "sha256:edfe3341033a6b53a5c522c802deb2079eee5cbfbb0af032a55064bd65c73a23"}, - {file = "aiohttp-3.10.10.tar.gz", hash = "sha256:0631dd7c9f0822cc61c88586ca76d5b5ada26538097d0f1df510b082bad3411a"}, + {file = "aiohttp-3.11.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8bedb1f6cb919af3b6353921c71281b1491f948ca64408871465d889b4ee1b66"}, + {file = "aiohttp-3.11.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f5022504adab881e2d801a88b748ea63f2a9d130e0b2c430824682a96f6534be"}, + {file = "aiohttp-3.11.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e22d1721c978a6494adc824e0916f9d187fa57baeda34b55140315fa2f740184"}, + {file = "aiohttp-3.11.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e993676c71288618eb07e20622572b1250d8713e7e00ab3aabae28cb70f3640d"}, + {file = "aiohttp-3.11.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e13a05db87d3b241c186d0936808d0e4e12decc267c617d54e9c643807e968b6"}, + {file = "aiohttp-3.11.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ba8d043fed7ffa117024d7ba66fdea011c0e7602327c6d73cacaea38abe4491"}, + {file = "aiohttp-3.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dda3ed0a7869d2fa16aa41f9961ade73aa2c2e3b2fcb0a352524e7b744881889"}, + {file = "aiohttp-3.11.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43bfd25113c1e98aec6c70e26d5f4331efbf4aa9037ba9ad88f090853bf64d7f"}, + {file = "aiohttp-3.11.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3dd3e7e7c9ef3e7214f014f1ae260892286647b3cf7c7f1b644a568fd410f8ca"}, + {file = "aiohttp-3.11.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:78c657ece7a73b976905ab9ec8be9ef2df12ed8984c24598a1791c58ce3b4ce4"}, + {file = "aiohttp-3.11.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:db70a47987e34494b451a334605bee57a126fe8d290511349e86810b4be53b01"}, + {file = "aiohttp-3.11.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:9e67531370a3b07e49b280c1f8c2df67985c790ad2834d1b288a2f13cd341c5f"}, + {file = "aiohttp-3.11.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9202f184cc0582b1db15056f2225ab4c1e3dac4d9ade50dd0613ac3c46352ac2"}, + {file = "aiohttp-3.11.7-cp310-cp310-win32.whl", hash = "sha256:2257bdd5cf54a4039a4337162cd8048f05a724380a2283df34620f55d4e29341"}, + {file = "aiohttp-3.11.7-cp310-cp310-win_amd64.whl", hash = "sha256:b7215bf2b53bc6cb35808149980c2ae80a4ae4e273890ac85459c014d5aa60ac"}, + {file = "aiohttp-3.11.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cea52d11e02123f125f9055dfe0ccf1c3857225fb879e4a944fae12989e2aef2"}, + {file = "aiohttp-3.11.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3ce18f703b7298e7f7633efd6a90138d99a3f9a656cb52c1201e76cb5d79cf08"}, + {file = "aiohttp-3.11.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:670847ee6aeb3a569cd7cdfbe0c3bec1d44828bbfbe78c5d305f7f804870ef9e"}, + {file = "aiohttp-3.11.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4dda726f89bfa5c465ba45b76515135a3ece0088dfa2da49b8bb278f3bdeea12"}, + {file = "aiohttp-3.11.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c25b74a811dba37c7ea6a14d99eb9402d89c8d739d50748a75f3cf994cf19c43"}, + {file = "aiohttp-3.11.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5522ee72f95661e79db691310290c4618b86dff2d9b90baedf343fd7a08bf79"}, + {file = "aiohttp-3.11.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fbf41a6bbc319a7816ae0f0177c265b62f2a59ad301a0e49b395746eb2a9884"}, + {file = "aiohttp-3.11.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:59ee1925b5a5efdf6c4e7be51deee93984d0ac14a6897bd521b498b9916f1544"}, + {file = "aiohttp-3.11.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:24054fce8c6d6f33a3e35d1c603ef1b91bbcba73e3f04a22b4f2f27dac59b347"}, + {file = "aiohttp-3.11.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:351849aca2c6f814575c1a485c01c17a4240413f960df1bf9f5deb0003c61a53"}, + {file = "aiohttp-3.11.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:12724f3a211fa243570e601f65a8831372caf1a149d2f1859f68479f07efec3d"}, + {file = "aiohttp-3.11.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:7ea4490360b605804bea8173d2d086b6c379d6bb22ac434de605a9cbce006e7d"}, + {file = "aiohttp-3.11.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e0bf378db07df0a713a1e32381a1b277e62ad106d0dbe17b5479e76ec706d720"}, + {file = "aiohttp-3.11.7-cp311-cp311-win32.whl", hash = "sha256:cd8d62cab363dfe713067027a5adb4907515861f1e4ce63e7be810b83668b847"}, + {file = "aiohttp-3.11.7-cp311-cp311-win_amd64.whl", hash = "sha256:bf0e6cce113596377cadda4e3ac5fb89f095bd492226e46d91b4baef1dd16f60"}, + {file = "aiohttp-3.11.7-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:4bb7493c3e3a36d3012b8564bd0e2783259ddd7ef3a81a74f0dbfa000fce48b7"}, + {file = "aiohttp-3.11.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e143b0ef9cb1a2b4f74f56d4fbe50caa7c2bb93390aff52f9398d21d89bc73ea"}, + {file = "aiohttp-3.11.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f7c58a240260822dc07f6ae32a0293dd5bccd618bb2d0f36d51c5dbd526f89c0"}, + {file = "aiohttp-3.11.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d20cfe63a1c135d26bde8c1d0ea46fd1200884afbc523466d2f1cf517d1fe33"}, + {file = "aiohttp-3.11.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12e4d45847a174f77b2b9919719203769f220058f642b08504cf8b1cf185dacf"}, + {file = "aiohttp-3.11.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf4efa2d01f697a7dbd0509891a286a4af0d86902fc594e20e3b1712c28c0106"}, + {file = "aiohttp-3.11.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee6a4cdcbf54b8083dc9723cdf5f41f722c00db40ccf9ec2616e27869151129"}, + {file = "aiohttp-3.11.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6095aaf852c34f42e1bd0cf0dc32d1e4b48a90bfb5054abdbb9d64b36acadcb"}, + {file = "aiohttp-3.11.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1cf03d27885f8c5ebf3993a220cc84fc66375e1e6e812731f51aab2b2748f4a6"}, + {file = "aiohttp-3.11.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:1a17f6a230f81eb53282503823f59d61dff14fb2a93847bf0399dc8e87817307"}, + {file = "aiohttp-3.11.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:481f10a1a45c5f4c4a578bbd74cff22eb64460a6549819242a87a80788461fba"}, + {file = "aiohttp-3.11.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:db37248535d1ae40735d15bdf26ad43be19e3d93ab3f3dad8507eb0f85bb8124"}, + {file = "aiohttp-3.11.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9d18a8b44ec8502a7fde91446cd9c9b95ce7c49f1eacc1fb2358b8907d4369fd"}, + {file = "aiohttp-3.11.7-cp312-cp312-win32.whl", hash = "sha256:3d1c9c15d3999107cbb9b2d76ca6172e6710a12fda22434ee8bd3f432b7b17e8"}, + {file = "aiohttp-3.11.7-cp312-cp312-win_amd64.whl", hash = "sha256:018f1b04883a12e77e7fc161934c0f298865d3a484aea536a6a2ca8d909f0ba0"}, + {file = "aiohttp-3.11.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:241a6ca732d2766836d62c58c49ca7a93d08251daef0c1e3c850df1d1ca0cbc4"}, + {file = "aiohttp-3.11.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:aa3705a8d14de39898da0fbad920b2a37b7547c3afd2a18b9b81f0223b7d0f68"}, + {file = "aiohttp-3.11.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9acfc7f652b31853eed3b92095b0acf06fd5597eeea42e939bd23a17137679d5"}, + {file = "aiohttp-3.11.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcefcf2915a2dbdbce37e2fc1622129a1918abfe3d06721ce9f6cdac9b6d2eaa"}, + {file = "aiohttp-3.11.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c1f6490dd1862af5aae6cfcf2a274bffa9a5b32a8f5acb519a7ecf5a99a88866"}, + {file = "aiohttp-3.11.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac5462582d6561c1c1708853a9faf612ff4e5ea5e679e99be36143d6eabd8e"}, + {file = "aiohttp-3.11.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1a6309005acc4b2bcc577ba3b9169fea52638709ffacbd071f3503264620da"}, + {file = "aiohttp-3.11.7-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f5b973cce96793725ef63eb449adfb74f99c043c718acb76e0d2a447ae369962"}, + {file = "aiohttp-3.11.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ce91a24aac80de6be8512fb1c4838a9881aa713f44f4e91dd7bb3b34061b497d"}, + {file = "aiohttp-3.11.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:875f7100ce0e74af51d4139495eec4025affa1a605280f23990b6434b81df1bd"}, + {file = "aiohttp-3.11.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c171fc35d3174bbf4787381716564042a4cbc008824d8195eede3d9b938e29a8"}, + {file = "aiohttp-3.11.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:ee9afa1b0d2293c46954f47f33e150798ad68b78925e3710044e0d67a9487791"}, + {file = "aiohttp-3.11.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8360c7cc620abb320e1b8d603c39095101391a82b1d0be05fb2225471c9c5c52"}, + {file = "aiohttp-3.11.7-cp313-cp313-win32.whl", hash = "sha256:7a9318da4b4ada9a67c1dd84d1c0834123081e746bee311a16bb449f363d965e"}, + {file = "aiohttp-3.11.7-cp313-cp313-win_amd64.whl", hash = "sha256:fc6da202068e0a268e298d7cd09b6e9f3997736cd9b060e2750963754552a0a9"}, + {file = "aiohttp-3.11.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:17829f37c0d31d89aa6b8b010475a10233774771f9b6dc2cc352ea4f8ce95d9a"}, + {file = "aiohttp-3.11.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d6177077a31b1aecfc3c9070bd2f11419dbb4a70f30f4c65b124714f525c2e48"}, + {file = "aiohttp-3.11.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:badda65ac99555791eed75e234afb94686ed2317670c68bff8a4498acdaee935"}, + {file = "aiohttp-3.11.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0de6466b9d742b4ee56fe1b2440706e225eb48c77c63152b1584864a236e7a50"}, + {file = "aiohttp-3.11.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04b0cc74d5a882c9dacaeeccc1444f0233212b6f5be8bc90833feef1e1ce14b9"}, + {file = "aiohttp-3.11.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c7af3e50e5903d21d7b935aceed901cc2475463bc16ddd5587653548661fdb"}, + {file = "aiohttp-3.11.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c63f898f683d1379b9be5afc3dd139e20b30b0b1e0bf69a3fc3681f364cf1629"}, + {file = "aiohttp-3.11.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdadc3f6a32d6eca45f9a900a254757fd7855dfb2d8f8dcf0e88f0fae3ff8eb1"}, + {file = "aiohttp-3.11.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d329300fb23e14ed1f8c6d688dfd867d1dcc3b1d7cd49b7f8c5b44e797ce0932"}, + {file = "aiohttp-3.11.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:5578cf40440eafcb054cf859964bc120ab52ebe0e0562d2b898126d868749629"}, + {file = "aiohttp-3.11.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:7b2f8107a3c329789f3c00b2daad0e35f548d0a55cda6291579136622099a46e"}, + {file = "aiohttp-3.11.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:43dd89a6194f6ab02a3fe36b09e42e2df19c211fc2050ce37374d96f39604997"}, + {file = "aiohttp-3.11.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d2fa6fc7cc865d26ff42480ac9b52b8c9b7da30a10a6442a9cdf429de840e949"}, + {file = "aiohttp-3.11.7-cp39-cp39-win32.whl", hash = "sha256:a7d9a606355655617fee25dd7e54d3af50804d002f1fd3118dd6312d26692d70"}, + {file = "aiohttp-3.11.7-cp39-cp39-win_amd64.whl", hash = "sha256:53c921b58fdc6485d6b2603e0132bb01cd59b8f0620ffc0907f525e0ba071687"}, + {file = "aiohttp-3.11.7.tar.gz", hash = "sha256:01a8aca4af3da85cea5c90141d23f4b0eee3cbecfd33b029a45a80f28c66c668"}, ] [package.dependencies] aiohappyeyeballs = ">=2.3.0" aiosignal = ">=1.1.2" -async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} +async-timeout = {version = ">=4.0,<6.0", markers = "python_version < \"3.11\""} attrs = ">=17.3.0" frozenlist = ">=1.1.1" multidict = ">=4.5,<7.0" -yarl = ">=1.12.0,<2.0" +propcache = ">=0.2.0" +yarl = ">=1.17.0,<2.0" [package.extras] speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] @@ -627,13 +613,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.6" +version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, - {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, ] [package.dependencies] @@ -709,84 +695,84 @@ files = [ [[package]] name = "jiter" -version = "0.6.1" +version = "0.7.1" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.8" files = [ - {file = "jiter-0.6.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d08510593cb57296851080018006dfc394070178d238b767b1879dc1013b106c"}, - {file = "jiter-0.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adef59d5e2394ebbad13b7ed5e0306cceb1df92e2de688824232a91588e77aa7"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3e02f7a27f2bcc15b7d455c9df05df8ffffcc596a2a541eeda9a3110326e7a3"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed69a7971d67b08f152c17c638f0e8c2aa207e9dd3a5fcd3cba294d39b5a8d2d"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2019d966e98f7c6df24b3b8363998575f47d26471bfb14aade37630fae836a1"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36c0b51a285b68311e207a76c385650322734c8717d16c2eb8af75c9d69506e7"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:220e0963b4fb507c525c8f58cde3da6b1be0bfddb7ffd6798fb8f2531226cdb1"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa25c7a9bf7875a141182b9c95aed487add635da01942ef7ca726e42a0c09058"}, - {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e90552109ca8ccd07f47ca99c8a1509ced93920d271bb81780a973279974c5ab"}, - {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:67723a011964971864e0b484b0ecfee6a14de1533cff7ffd71189e92103b38a8"}, - {file = "jiter-0.6.1-cp310-none-win32.whl", hash = "sha256:33af2b7d2bf310fdfec2da0177eab2fedab8679d1538d5b86a633ebfbbac4edd"}, - {file = "jiter-0.6.1-cp310-none-win_amd64.whl", hash = "sha256:7cea41c4c673353799906d940eee8f2d8fd1d9561d734aa921ae0f75cb9732f4"}, - {file = "jiter-0.6.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b03c24e7da7e75b170c7b2b172d9c5e463aa4b5c95696a368d52c295b3f6847f"}, - {file = "jiter-0.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:47fee1be677b25d0ef79d687e238dc6ac91a8e553e1a68d0839f38c69e0ee491"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25f0d2f6e01a8a0fb0eab6d0e469058dab2be46ff3139ed2d1543475b5a1d8e7"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b809e39e342c346df454b29bfcc7bca3d957f5d7b60e33dae42b0e5ec13e027"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e9ac7c2f092f231f5620bef23ce2e530bd218fc046098747cc390b21b8738a7a"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e51a2d80d5fe0ffb10ed2c82b6004458be4a3f2b9c7d09ed85baa2fbf033f54b"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3343d4706a2b7140e8bd49b6c8b0a82abf9194b3f0f5925a78fc69359f8fc33c"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82521000d18c71e41c96960cb36e915a357bc83d63a8bed63154b89d95d05ad1"}, - {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3c843e7c1633470708a3987e8ce617ee2979ee18542d6eb25ae92861af3f1d62"}, - {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a2e861658c3fe849efc39b06ebb98d042e4a4c51a8d7d1c3ddc3b1ea091d0784"}, - {file = "jiter-0.6.1-cp311-none-win32.whl", hash = "sha256:7d72fc86474862c9c6d1f87b921b70c362f2b7e8b2e3c798bb7d58e419a6bc0f"}, - {file = "jiter-0.6.1-cp311-none-win_amd64.whl", hash = "sha256:3e36a320634f33a07794bb15b8da995dccb94f944d298c8cfe2bd99b1b8a574a"}, - {file = "jiter-0.6.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1fad93654d5a7dcce0809aff66e883c98e2618b86656aeb2129db2cd6f26f867"}, - {file = "jiter-0.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4e6e340e8cd92edab7f6a3a904dbbc8137e7f4b347c49a27da9814015cc0420c"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:691352e5653af84ed71763c3c427cff05e4d658c508172e01e9c956dfe004aba"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:defee3949313c1f5b55e18be45089970cdb936eb2a0063f5020c4185db1b63c9"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26d2bdd5da097e624081c6b5d416d3ee73e5b13f1703bcdadbb1881f0caa1933"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18aa9d1626b61c0734b973ed7088f8a3d690d0b7f5384a5270cd04f4d9f26c86"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a3567c8228afa5ddcce950631c6b17397ed178003dc9ee7e567c4c4dcae9fa0"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5c0507131c922defe3f04c527d6838932fcdfd69facebafd7d3574fa3395314"}, - {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:540fcb224d7dc1bcf82f90f2ffb652df96f2851c031adca3c8741cb91877143b"}, - {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e7b75436d4fa2032b2530ad989e4cb0ca74c655975e3ff49f91a1a3d7f4e1df2"}, - {file = "jiter-0.6.1-cp312-none-win32.whl", hash = "sha256:883d2ced7c21bf06874fdeecab15014c1c6d82216765ca6deef08e335fa719e0"}, - {file = "jiter-0.6.1-cp312-none-win_amd64.whl", hash = "sha256:91e63273563401aadc6c52cca64a7921c50b29372441adc104127b910e98a5b6"}, - {file = "jiter-0.6.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:852508a54fe3228432e56019da8b69208ea622a3069458252f725d634e955b31"}, - {file = "jiter-0.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f491cc69ff44e5a1e8bc6bf2b94c1f98d179e1aaf4a554493c171a5b2316b701"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc56c8f0b2a28ad4d8047f3ae62d25d0e9ae01b99940ec0283263a04724de1f3"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51b58f7a0d9e084a43b28b23da2b09fc5e8df6aa2b6a27de43f991293cab85fd"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f79ce15099154c90ef900d69c6b4c686b64dfe23b0114e0971f2fecd306ec6c"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:03a025b52009f47e53ea619175d17e4ded7c035c6fbd44935cb3ada11e1fd592"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c74a8d93718137c021d9295248a87c2f9fdc0dcafead12d2930bc459ad40f885"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40b03b75f903975f68199fc4ec73d546150919cb7e534f3b51e727c4d6ccca5a"}, - {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:825651a3f04cf92a661d22cad61fc913400e33aa89b3e3ad9a6aa9dc8a1f5a71"}, - {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:928bf25eb69ddb292ab8177fe69d3fbf76c7feab5fce1c09265a7dccf25d3991"}, - {file = "jiter-0.6.1-cp313-none-win32.whl", hash = "sha256:352cd24121e80d3d053fab1cc9806258cad27c53cad99b7a3cac57cf934b12e4"}, - {file = "jiter-0.6.1-cp313-none-win_amd64.whl", hash = "sha256:be7503dd6f4bf02c2a9bacb5cc9335bc59132e7eee9d3e931b13d76fd80d7fda"}, - {file = "jiter-0.6.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:31d8e00e1fb4c277df8ab6f31a671f509ebc791a80e5c61fdc6bc8696aaa297c"}, - {file = "jiter-0.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77c296d65003cd7ee5d7b0965f6acbe6cffaf9d1fa420ea751f60ef24e85fed5"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeeb0c0325ef96c12a48ea7e23e2e86fe4838e6e0a995f464cf4c79fa791ceeb"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a31c6fcbe7d6c25d6f1cc6bb1cba576251d32795d09c09961174fe461a1fb5bd"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59e2b37f3b9401fc9e619f4d4badcab2e8643a721838bcf695c2318a0475ae42"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bae5ae4853cb9644144e9d0755854ce5108d470d31541d83f70ca7ecdc2d1637"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9df588e9c830b72d8db1dd7d0175af6706b0904f682ea9b1ca8b46028e54d6e9"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15f8395e835cf561c85c1adee72d899abf2733d9df72e9798e6d667c9b5c1f30"}, - {file = "jiter-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a99d4e0b5fc3b05ea732d67eb2092fe894e95a90e6e413f2ea91387e228a307"}, - {file = "jiter-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a311df1fa6be0ccd64c12abcd85458383d96e542531bafbfc0a16ff6feda588f"}, - {file = "jiter-0.6.1-cp38-none-win32.whl", hash = "sha256:81116a6c272a11347b199f0e16b6bd63f4c9d9b52bc108991397dd80d3c78aba"}, - {file = "jiter-0.6.1-cp38-none-win_amd64.whl", hash = "sha256:13f9084e3e871a7c0b6e710db54444088b1dd9fbefa54d449b630d5e73bb95d0"}, - {file = "jiter-0.6.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:f1c53615fcfec3b11527c08d19cff6bc870da567ce4e57676c059a3102d3a082"}, - {file = "jiter-0.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f791b6a4da23238c17a81f44f5b55d08a420c5692c1fda84e301a4b036744eb1"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c97e90fec2da1d5f68ef121444c2c4fa72eabf3240829ad95cf6bbeca42a301"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3cbc1a66b4e41511209e97a2866898733c0110b7245791ac604117b7fb3fedb7"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4e85f9e12cd8418ab10e1fcf0e335ae5bb3da26c4d13a0fd9e6a17a674783b6"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08be33db6dcc374c9cc19d3633af5e47961a7b10d4c61710bd39e48d52a35824"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:677be9550004f5e010d673d3b2a2b815a8ea07a71484a57d3f85dde7f14cf132"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e8bd065be46c2eecc328e419d6557bbc37844c88bb07b7a8d2d6c91c7c4dedc9"}, - {file = "jiter-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bd95375ce3609ec079a97c5d165afdd25693302c071ca60c7ae1cf826eb32022"}, - {file = "jiter-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db459ed22d0208940d87f614e1f0ea5a946d29a3cfef71f7e1aab59b6c6b2afb"}, - {file = "jiter-0.6.1-cp39-none-win32.whl", hash = "sha256:d71c962f0971347bd552940ab96aa42ceefcd51b88c4ced8a27398182efa8d80"}, - {file = "jiter-0.6.1-cp39-none-win_amd64.whl", hash = "sha256:d465db62d2d10b489b7e7a33027c4ae3a64374425d757e963f86df5b5f2e7fc5"}, - {file = "jiter-0.6.1.tar.gz", hash = "sha256:e19cd21221fc139fb032e4112986656cb2739e9fe6d84c13956ab30ccc7d4449"}, + {file = "jiter-0.7.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:262e96d06696b673fad6f257e6a0abb6e873dc22818ca0e0600f4a1189eb334f"}, + {file = "jiter-0.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be6de02939aac5be97eb437f45cfd279b1dc9de358b13ea6e040e63a3221c40d"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935f10b802bc1ce2b2f61843e498c7720aa7f4e4bb7797aa8121eab017293c3d"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9cd3cccccabf5064e4bb3099c87bf67db94f805c1e62d1aefd2b7476e90e0ee2"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4aa919ebfc5f7b027cc368fe3964c0015e1963b92e1db382419dadb098a05192"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ae2d01e82c94491ce4d6f461a837f63b6c4e6dd5bb082553a70c509034ff3d4"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f9568cd66dbbdab67ae1b4c99f3f7da1228c5682d65913e3f5f95586b3cb9a9"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9ecbf4e20ec2c26512736284dc1a3f8ed79b6ca7188e3b99032757ad48db97dc"}, + {file = "jiter-0.7.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b1a0508fddc70ce00b872e463b387d49308ef02b0787992ca471c8d4ba1c0fa1"}, + {file = "jiter-0.7.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f84c9996664c460f24213ff1e5881530abd8fafd82058d39af3682d5fd2d6316"}, + {file = "jiter-0.7.1-cp310-none-win32.whl", hash = "sha256:c915e1a1960976ba4dfe06551ea87063b2d5b4d30759012210099e712a414d9f"}, + {file = "jiter-0.7.1-cp310-none-win_amd64.whl", hash = "sha256:75bf3b7fdc5c0faa6ffffcf8028a1f974d126bac86d96490d1b51b3210aa0f3f"}, + {file = "jiter-0.7.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ad04a23a91f3d10d69d6c87a5f4471b61c2c5cd6e112e85136594a02043f462c"}, + {file = "jiter-0.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e47a554de88dff701226bb5722b7f1b6bccd0b98f1748459b7e56acac2707a5"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e44fff69c814a2e96a20b4ecee3e2365e9b15cf5fe4e00869d18396daa91dab"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df0a1d05081541b45743c965436f8b5a1048d6fd726e4a030113a2699a6046ea"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f22cf8f236a645cb6d8ffe2a64edb5d2b66fb148bf7c75eea0cb36d17014a7bc"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da8589f50b728ea4bf22e0632eefa125c8aa9c38ed202a5ee6ca371f05eeb3ff"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f20de711224f2ca2dbb166a8d512f6ff48c9c38cc06b51f796520eb4722cc2ce"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8a9803396032117b85ec8cbf008a54590644a062fedd0425cbdb95e4b2b60479"}, + {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3d8bae77c82741032e9d89a4026479061aba6e646de3bf5f2fc1ae2bbd9d06e0"}, + {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3dc9939e576bbc68c813fc82f6620353ed68c194c7bcf3d58dc822591ec12490"}, + {file = "jiter-0.7.1-cp311-none-win32.whl", hash = "sha256:f7605d24cd6fab156ec89e7924578e21604feee9c4f1e9da34d8b67f63e54892"}, + {file = "jiter-0.7.1-cp311-none-win_amd64.whl", hash = "sha256:f3ea649e7751a1a29ea5ecc03c4ada0a833846c59c6da75d747899f9b48b7282"}, + {file = "jiter-0.7.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ad36a1155cbd92e7a084a568f7dc6023497df781adf2390c345dd77a120905ca"}, + {file = "jiter-0.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7ba52e6aaed2dc5c81a3d9b5e4ab95b039c4592c66ac973879ba57c3506492bb"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b7de0b6f6728b678540c7927587e23f715284596724be203af952418acb8a2d"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9463b62bd53c2fb85529c700c6a3beb2ee54fde8bef714b150601616dcb184a6"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:627164ec01d28af56e1f549da84caf0fe06da3880ebc7b7ee1ca15df106ae172"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25d0e5bf64e368b0aa9e0a559c3ab2f9b67e35fe7269e8a0d81f48bbd10e8963"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c244261306f08f8008b3087059601997016549cb8bb23cf4317a4827f07b7d74"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7ded4e4b75b68b843b7cea5cd7c55f738c20e1394c68c2cb10adb655526c5f1b"}, + {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:80dae4f1889b9d09e5f4de6b58c490d9c8ce7730e35e0b8643ab62b1538f095c"}, + {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5970cf8ec943b51bce7f4b98d2e1ed3ada170c2a789e2db3cb484486591a176a"}, + {file = "jiter-0.7.1-cp312-none-win32.whl", hash = "sha256:701d90220d6ecb3125d46853c8ca8a5bc158de8c49af60fd706475a49fee157e"}, + {file = "jiter-0.7.1-cp312-none-win_amd64.whl", hash = "sha256:7824c3ecf9ecf3321c37f4e4d4411aad49c666ee5bc2a937071bdd80917e4533"}, + {file = "jiter-0.7.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:097676a37778ba3c80cb53f34abd6943ceb0848263c21bf423ae98b090f6c6ba"}, + {file = "jiter-0.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3298af506d4271257c0a8f48668b0f47048d69351675dd8500f22420d4eec378"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12fd88cfe6067e2199964839c19bd2b422ca3fd792949b8f44bb8a4e7d21946a"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dacca921efcd21939123c8ea8883a54b9fa7f6545c8019ffcf4f762985b6d0c8"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de3674a5fe1f6713a746d25ad9c32cd32fadc824e64b9d6159b3b34fd9134143"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65df9dbae6d67e0788a05b4bad5706ad40f6f911e0137eb416b9eead6ba6f044"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ba9a358d59a0a55cccaa4957e6ae10b1a25ffdabda863c0343c51817610501d"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:576eb0f0c6207e9ede2b11ec01d9c2182973986514f9c60bc3b3b5d5798c8f50"}, + {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:e550e29cdf3577d2c970a18f3959e6b8646fd60ef1b0507e5947dc73703b5627"}, + {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:81d968dbf3ce0db2e0e4dec6b0a0d5d94f846ee84caf779b07cab49f5325ae43"}, + {file = "jiter-0.7.1-cp313-none-win32.whl", hash = "sha256:f892e547e6e79a1506eb571a676cf2f480a4533675f834e9ae98de84f9b941ac"}, + {file = "jiter-0.7.1-cp313-none-win_amd64.whl", hash = "sha256:0302f0940b1455b2a7fb0409b8d5b31183db70d2b07fd177906d83bf941385d1"}, + {file = "jiter-0.7.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c65a3ce72b679958b79d556473f192a4dfc5895e8cc1030c9f4e434690906076"}, + {file = "jiter-0.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e80052d3db39f9bb8eb86d207a1be3d9ecee5e05fdec31380817f9609ad38e60"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70a497859c4f3f7acd71c8bd89a6f9cf753ebacacf5e3e799138b8e1843084e3"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c1288bc22b9e36854a0536ba83666c3b1fb066b811019d7b682c9cf0269cdf9f"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b096ca72dd38ef35675e1d3b01785874315182243ef7aea9752cb62266ad516f"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8dbbd52c50b605af13dbee1a08373c520e6fcc6b5d32f17738875847fea4e2cd"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af29c5c6eb2517e71ffa15c7ae9509fa5e833ec2a99319ac88cc271eca865519"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f114a4df1e40c03c0efbf974b376ed57756a1141eb27d04baee0680c5af3d424"}, + {file = "jiter-0.7.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:191fbaee7cf46a9dd9b817547bf556facde50f83199d07fc48ebeff4082f9df4"}, + {file = "jiter-0.7.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0e2b445e5ee627fb4ee6bbceeb486251e60a0c881a8e12398dfdff47c56f0723"}, + {file = "jiter-0.7.1-cp38-none-win32.whl", hash = "sha256:47ac4c3cf8135c83e64755b7276339b26cd3c7ddadf9e67306ace4832b283edf"}, + {file = "jiter-0.7.1-cp38-none-win_amd64.whl", hash = "sha256:60b49c245cd90cde4794f5c30f123ee06ccf42fb8730a019a2870cd005653ebd"}, + {file = "jiter-0.7.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8f212eeacc7203256f526f550d105d8efa24605828382cd7d296b703181ff11d"}, + {file = "jiter-0.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d9e247079d88c00e75e297e6cb3a18a039ebcd79fefc43be9ba4eb7fb43eb726"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0aacaa56360139c53dcf352992b0331f4057a0373bbffd43f64ba0c32d2d155"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bc1b55314ca97dbb6c48d9144323896e9c1a25d41c65bcb9550b3e0c270ca560"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f281aae41b47e90deb70e7386558e877a8e62e1693e0086f37d015fa1c102289"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93c20d2730a84d43f7c0b6fb2579dc54335db742a59cf9776d0b80e99d587382"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e81ccccd8069110e150613496deafa10da2f6ff322a707cbec2b0d52a87b9671"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a7d5e85766eff4c9be481d77e2226b4c259999cb6862ccac5ef6621d3c8dcce"}, + {file = "jiter-0.7.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f52ce5799df5b6975439ecb16b1e879d7655e1685b6e3758c9b1b97696313bfb"}, + {file = "jiter-0.7.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e0c91a0304373fdf97d56f88356a010bba442e6d995eb7773cbe32885b71cdd8"}, + {file = "jiter-0.7.1-cp39-none-win32.whl", hash = "sha256:5c08adf93e41ce2755970e8aa95262298afe2bf58897fb9653c47cd93c3c6cdc"}, + {file = "jiter-0.7.1-cp39-none-win_amd64.whl", hash = "sha256:6592f4067c74176e5f369228fb2995ed01400c9e8e1225fb73417183a5e635f0"}, + {file = "jiter-0.7.1.tar.gz", hash = "sha256:448cf4f74f7363c34cdef26214da527e8eeffd88ba06d0b80b485ad0667baf5d"}, ] [[package]] @@ -816,7 +802,7 @@ files = [ [[package]] name = "langchain" -version = "0.3.4" +version = "0.3.7" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -826,12 +812,12 @@ develop = false [package.dependencies] aiohttp = "^3.8.3" async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""} -langchain-core = "^0.3.12" +langchain-core = "^0.3.15" langchain-text-splitters = "^0.3.0" langsmith = "^0.1.17" numpy = [ - {version = ">=1,<2", markers = "python_version < \"3.12\""}, - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.22.4,<2", markers = "python_version < \"3.12\""}, + {version = ">=1.26.2,<2", markers = "python_version >= \"3.12\""}, ] pydantic = "^2.7.4" PyYAML = ">=5.3" @@ -843,12 +829,12 @@ tenacity = ">=8.1.0,!=8.4.0,<10" type = "git" url = "https://github.com/langchain-ai/langchain.git" reference = "HEAD" -resolved_reference = "0606aabfa39acb2ec575ea8bbfa4c8e662a6134f" +resolved_reference = "f173b72e35979b842933774c9c4568c329a0ae8a" subdirectory = "libs/langchain" [[package]] name = "langchain-community" -version = "0.3.3" +version = "0.3.7" description = "Community contributed LangChain integrations." optional = false python-versions = ">=3.9,<4.0" @@ -859,29 +845,29 @@ develop = false aiohttp = "^3.8.3" dataclasses-json = ">= 0.5.7, < 0.7" httpx-sse = "^0.4.0" -langchain = "^0.3.4" -langchain-core = "^0.3.12" +langchain = "^0.3.7" +langchain-core = "^0.3.17" langsmith = "^0.1.125" numpy = [ - {version = ">=1,<2", markers = "python_version < \"3.12\""}, - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.22.4,<2", markers = "python_version < \"3.12\""}, + {version = ">=1.26.2,<2", markers = "python_version >= \"3.12\""}, ] pydantic-settings = "^2.4.0" PyYAML = ">=5.3" requests = "^2" -SQLAlchemy = ">=1.4,<3" +SQLAlchemy = ">=1.4,<2.0.36" tenacity = ">=8.1.0,!=8.4.0,<10" [package.source] type = "git" url = "https://github.com/langchain-ai/langchain.git" reference = "HEAD" -resolved_reference = "0606aabfa39acb2ec575ea8bbfa4c8e662a6134f" +resolved_reference = "f173b72e35979b842933774c9c4568c329a0ae8a" subdirectory = "libs/community" [[package]] name = "langchain-core" -version = "0.3.13" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -904,12 +890,12 @@ typing-extensions = ">=4.7" type = "git" url = "https://github.com/langchain-ai/langchain.git" reference = "HEAD" -resolved_reference = "0606aabfa39acb2ec575ea8bbfa4c8e662a6134f" +resolved_reference = "f173b72e35979b842933774c9c4568c329a0ae8a" subdirectory = "libs/core" [[package]] name = "langchain-openai" -version = "0.2.4" +version = "0.2.9" description = "An integration package connecting OpenAI and LangChain" optional = false python-versions = ">=3.9,<4.0" @@ -917,20 +903,20 @@ files = [] develop = false [package.dependencies] -langchain-core = "^0.3.13" -openai = "^1.52.0" +langchain-core = "^0.3.17" +openai = "^1.54.0" tiktoken = ">=0.7,<1" [package.source] type = "git" url = "https://github.com/langchain-ai/langchain.git" reference = "HEAD" -resolved_reference = "0606aabfa39acb2ec575ea8bbfa4c8e662a6134f" +resolved_reference = "f173b72e35979b842933774c9c4568c329a0ae8a" subdirectory = "libs/partners/openai" [[package]] name = "langchain-text-splitters" -version = "0.3.0" +version = "0.3.2" description = "LangChain text splitting utilities" optional = false python-versions = ">=3.9,<4.0" @@ -938,29 +924,29 @@ files = [] develop = false [package.dependencies] -langchain-core = "^0.3.0" +langchain-core = "^0.3.15" [package.source] type = "git" url = "https://github.com/langchain-ai/langchain.git" reference = "HEAD" -resolved_reference = "0606aabfa39acb2ec575ea8bbfa4c8e662a6134f" +resolved_reference = "f173b72e35979b842933774c9c4568c329a0ae8a" subdirectory = "libs/text-splitters" [[package]] name = "langsmith" -version = "0.1.137" +version = "0.1.144" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.137-py3-none-any.whl", hash = "sha256:4256d5c61133749890f7b5c88321dbb133ce0f440c621ea28e76513285859b81"}, - {file = "langsmith-0.1.137.tar.gz", hash = "sha256:56cdfcc6c74cb20a3f437d5bd144feb5bf93f54c5a2918d1e568cbd084a372d4"}, + {file = "langsmith-0.1.144-py3-none-any.whl", hash = "sha256:08ffb975bff2e82fc6f5428837c64c074ea25102d08a25e256361a80812c6100"}, + {file = "langsmith-0.1.144.tar.gz", hash = "sha256:b621f358d5a33441d7b5e7264c376bf4ea82bfc62d7e41aafc0f8094e3bd6369"}, ] [package.dependencies] httpx = ">=0.23.0,<1" -orjson = ">=3.9.14,<4.0.0" +orjson = {version = ">=3.9.14,<4.0.0", markers = "platform_python_implementation != \"PyPy\""} pydantic = [ {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, @@ -970,13 +956,13 @@ requests-toolbelt = ">=1.0.0,<2.0.0" [[package]] name = "marshmallow" -version = "3.23.0" +version = "3.23.1" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.9" files = [ - {file = "marshmallow-3.23.0-py3-none-any.whl", hash = "sha256:82f20a2397834fe6d9611b241f2f7e7b680ed89c49f84728a1ad937be6b4bdf4"}, - {file = "marshmallow-3.23.0.tar.gz", hash = "sha256:98d8827a9f10c03d44ead298d2e99c6aea8197df18ccfad360dae7f89a50da2e"}, + {file = "marshmallow-3.23.1-py3-none-any.whl", hash = "sha256:fece2eb2c941180ea1b7fcbd4a83c51bfdd50093fdd3ad2585ee5e1df2508491"}, + {file = "marshmallow-3.23.1.tar.gz", hash = "sha256:3a8dfda6edd8dcdbf216c0ede1d1e78d230a6dc9c5a088f58c4083b974a0d468"}, ] [package.dependencies] @@ -984,18 +970,18 @@ packaging = ">=17.0" [package.extras] dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"] -docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.1.3)", "sphinx-issues (==5.0.0)", "sphinx-version-warning (==1.1.2)"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.14)", "sphinx (==8.1.3)", "sphinx-issues (==5.0.0)", "sphinx-version-warning (==1.1.2)"] tests = ["pytest", "simplejson"] [[package]] name = "mongomock" -version = "4.2.0.post1" +version = "4.3.0" description = "Fake pymongo stub for testing simple MongoDB-dependent code" optional = false python-versions = "*" files = [ - {file = "mongomock-4.2.0.post1-py2.py3-none-any.whl", hash = "sha256:ff78f1944bf0cdcfc291ece198357db805c2f0db39e814bcef8a43c9f53e8a81"}, - {file = "mongomock-4.2.0.post1.tar.gz", hash = "sha256:9241d2cec7274b9736dbe8edacb19528ff66af3b3779b324d79ecc4201227f31"}, + {file = "mongomock-4.3.0-py2.py3-none-any.whl", hash = "sha256:5ef86bd12fc8806c6e7af32f21266c61b6c4ba96096f85129852d1c4fec1327e"}, + {file = "mongomock-4.3.0.tar.gz", hash = "sha256:32667b79066fabc12d4f17f16a8fd7361b5f4435208b3ba32c226e52212a8c30"}, ] [package.dependencies] @@ -1246,13 +1232,13 @@ files = [ [[package]] name = "openai" -version = "1.52.2" +version = "1.55.0" description = "The official Python library for the openai API" optional = false -python-versions = ">=3.7.1" +python-versions = ">=3.8" files = [ - {file = "openai-1.52.2-py3-none-any.whl", hash = "sha256:57e9e37bc407f39bb6ec3a27d7e8fb9728b2779936daa1fcf95df17d3edfaccc"}, - {file = "openai-1.52.2.tar.gz", hash = "sha256:87b7d0f69d85f5641678d414b7ee3082363647a5c66a462ed7f3ccb59582da0d"}, + {file = "openai-1.55.0-py3-none-any.whl", hash = "sha256:446e08918f8dd70d8723274be860404c8c7cc46b91b93bbc0ef051f57eb503c1"}, + {file = "openai-1.55.0.tar.gz", hash = "sha256:6c0975ac8540fe639d12b4ff5a8e0bf1424c844c4a4251148f59f06c4b2bd5db"}, ] [package.dependencies] @@ -1270,80 +1256,80 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "orjson" -version = "3.10.10" +version = "3.10.11" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b788a579b113acf1c57e0a68e558be71d5d09aa67f62ca1f68e01117e550a998"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:804b18e2b88022c8905bb79bd2cbe59c0cd014b9328f43da8d3b28441995cda4"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9972572a1d042ec9ee421b6da69f7cc823da5962237563fa548ab17f152f0b9b"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc6993ab1c2ae7dd0711161e303f1db69062955ac2668181bfdf2dd410e65258"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d78e4cacced5781b01d9bc0f0cd8b70b906a0e109825cb41c1b03f9c41e4ce86"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6eb2598df518281ba0cbc30d24c5b06124ccf7e19169e883c14e0831217a0bc"}, - {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23776265c5215ec532de6238a52707048401a568f0fa0d938008e92a147fe2c7"}, - {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8cc2a654c08755cef90b468ff17c102e2def0edd62898b2486767204a7f5cc9c"}, - {file = "orjson-3.10.10-cp310-none-win32.whl", hash = "sha256:081b3fc6a86d72efeb67c13d0ea7c030017bd95f9868b1e329a376edc456153b"}, - {file = "orjson-3.10.10-cp310-none-win_amd64.whl", hash = "sha256:ff38c5fb749347768a603be1fb8a31856458af839f31f064c5aa74aca5be9efe"}, - {file = "orjson-3.10.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:879e99486c0fbb256266c7c6a67ff84f46035e4f8749ac6317cc83dacd7f993a"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019481fa9ea5ff13b5d5d95e6fd5ab25ded0810c80b150c2c7b1cc8660b662a7"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0dd57eff09894938b4c86d4b871a479260f9e156fa7f12f8cad4b39ea8028bb5"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dbde6d70cd95ab4d11ea8ac5e738e30764e510fc54d777336eec09bb93b8576c"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2625cb37b8fb42e2147404e5ff7ef08712099197a9cd38895006d7053e69d6"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbf3c20c6a7db69df58672a0d5815647ecf78c8e62a4d9bd284e8621c1fe5ccb"}, - {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:75c38f5647e02d423807d252ce4528bf6a95bd776af999cb1fb48867ed01d1f6"}, - {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:23458d31fa50ec18e0ec4b0b4343730928296b11111df5f547c75913714116b2"}, - {file = "orjson-3.10.10-cp311-none-win32.whl", hash = "sha256:2787cd9dedc591c989f3facd7e3e86508eafdc9536a26ec277699c0aa63c685b"}, - {file = "orjson-3.10.10-cp311-none-win_amd64.whl", hash = "sha256:6514449d2c202a75183f807bc755167713297c69f1db57a89a1ef4a0170ee269"}, - {file = "orjson-3.10.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8564f48f3620861f5ef1e080ce7cd122ee89d7d6dacf25fcae675ff63b4d6e05"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bf161a32b479034098c5b81f2608f09167ad2fa1c06abd4e527ea6bf4837a9"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b65c93617bcafa7f04b74ae8bc2cc214bd5cb45168a953256ff83015c6747d"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8e28406f97fc2ea0c6150f4c1b6e8261453318930b334abc419214c82314f85"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4d0d9fe174cc7a5bdce2e6c378bcdb4c49b2bf522a8f996aa586020e1b96cee"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3be81c42f1242cbed03cbb3973501fcaa2675a0af638f8be494eaf37143d999"}, - {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65f9886d3bae65be026219c0a5f32dbbe91a9e6272f56d092ab22561ad0ea33b"}, - {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:730ed5350147db7beb23ddaf072f490329e90a1d059711d364b49fe352ec987b"}, - {file = "orjson-3.10.10-cp312-none-win32.whl", hash = "sha256:a8f4bf5f1c85bea2170800020d53a8877812892697f9c2de73d576c9307a8a5f"}, - {file = "orjson-3.10.10-cp312-none-win_amd64.whl", hash = "sha256:384cd13579a1b4cd689d218e329f459eb9ddc504fa48c5a83ef4889db7fd7a4f"}, - {file = "orjson-3.10.10-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44bffae68c291f94ff5a9b4149fe9d1bdd4cd0ff0fb575bcea8351d48db629a1"}, - {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e27b4c6437315df3024f0835887127dac2a0a3ff643500ec27088d2588fa5ae1"}, - {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca84df16d6b49325a4084fd8b2fe2229cb415e15c46c529f868c3387bb1339d"}, - {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c14ce70e8f39bd71f9f80423801b5d10bf93d1dceffdecd04df0f64d2c69bc01"}, - {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:24ac62336da9bda1bd93c0491eff0613003b48d3cb5d01470842e7b52a40d5b4"}, - {file = "orjson-3.10.10-cp313-none-win32.whl", hash = "sha256:eb0a42831372ec2b05acc9ee45af77bcaccbd91257345f93780a8e654efc75db"}, - {file = "orjson-3.10.10-cp313-none-win_amd64.whl", hash = "sha256:f0c4f37f8bf3f1075c6cc8dd8a9f843689a4b618628f8812d0a71e6968b95ffd"}, - {file = "orjson-3.10.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:829700cc18503efc0cf502d630f612884258020d98a317679cd2054af0259568"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0ceb5e0e8c4f010ac787d29ae6299846935044686509e2f0f06ed441c1ca949"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c25908eb86968613216f3db4d3003f1c45d78eb9046b71056ca327ff92bdbd4"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:218cb0bc03340144b6328a9ff78f0932e642199ac184dd74b01ad691f42f93ff"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2277ec2cea3775640dc81ab5195bb5b2ada2fe0ea6eee4677474edc75ea6785"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:848ea3b55ab5ccc9d7bbd420d69432628b691fba3ca8ae3148c35156cbd282aa"}, - {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e3e67b537ac0c835b25b5f7d40d83816abd2d3f4c0b0866ee981a045287a54f3"}, - {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7948cfb909353fce2135dcdbe4521a5e7e1159484e0bb024c1722f272488f2b8"}, - {file = "orjson-3.10.10-cp38-none-win32.whl", hash = "sha256:78bee66a988f1a333dc0b6257503d63553b1957889c17b2c4ed72385cd1b96ae"}, - {file = "orjson-3.10.10-cp38-none-win_amd64.whl", hash = "sha256:f1d647ca8d62afeb774340a343c7fc023efacfd3a39f70c798991063f0c681dd"}, - {file = "orjson-3.10.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5a059afddbaa6dd733b5a2d76a90dbc8af790b993b1b5cb97a1176ca713b5df8"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f9b5c59f7e2a1a410f971c5ebc68f1995822837cd10905ee255f96074537ee6"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d5ef198bafdef4aa9d49a4165ba53ffdc0a9e1c7b6f76178572ab33118afea25"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf29ce0bb5d3320824ec3d1508652421000ba466abd63bdd52c64bcce9eb1fa"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dddd5516bcc93e723d029c1633ae79c4417477b4f57dad9bfeeb6bc0315e654a"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12f2003695b10817f0fa8b8fca982ed7f5761dcb0d93cff4f2f9f6709903fd7"}, - {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:672f9874a8a8fb9bb1b771331d31ba27f57702c8106cdbadad8bda5d10bc1019"}, - {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dcbb0ca5fafb2b378b2c74419480ab2486326974826bbf6588f4dc62137570a"}, - {file = "orjson-3.10.10-cp39-none-win32.whl", hash = "sha256:d9bbd3a4b92256875cb058c3381b782649b9a3c68a4aa9a2fff020c2f9cfc1be"}, - {file = "orjson-3.10.10-cp39-none-win_amd64.whl", hash = "sha256:766f21487a53aee8524b97ca9582d5c6541b03ab6210fbaf10142ae2f3ced2aa"}, - {file = "orjson-3.10.10.tar.gz", hash = "sha256:37949383c4df7b4337ce82ee35b6d7471e55195efa7dcb45ab8226ceadb0fe3b"}, + {file = "orjson-3.10.11-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6dade64687f2bd7c090281652fe18f1151292d567a9302b34c2dbb92a3872f1f"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82f07c550a6ccd2b9290849b22316a609023ed851a87ea888c0456485a7d196a"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd9a187742d3ead9df2e49240234d728c67c356516cf4db018833a86f20ec18c"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77b0fed6f209d76c1c39f032a70df2d7acf24b1812ca3e6078fd04e8972685a3"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:63fc9d5fe1d4e8868f6aae547a7b8ba0a2e592929245fff61d633f4caccdcdd6"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65cd3e3bb4fbb4eddc3c1e8dce10dc0b73e808fcb875f9fab40c81903dd9323e"}, + {file = "orjson-3.10.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f67c570602300c4befbda12d153113b8974a3340fdcf3d6de095ede86c06d92"}, + {file = "orjson-3.10.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1f39728c7f7d766f1f5a769ce4d54b5aaa4c3f92d5b84817053cc9995b977acc"}, + {file = "orjson-3.10.11-cp310-none-win32.whl", hash = "sha256:1789d9db7968d805f3d94aae2c25d04014aae3a2fa65b1443117cd462c6da647"}, + {file = "orjson-3.10.11-cp310-none-win_amd64.whl", hash = "sha256:5576b1e5a53a5ba8f8df81872bb0878a112b3ebb1d392155f00f54dd86c83ff6"}, + {file = "orjson-3.10.11-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1444f9cb7c14055d595de1036f74ecd6ce15f04a715e73f33bb6326c9cef01b6"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdec57fe3b4bdebcc08a946db3365630332dbe575125ff3d80a3272ebd0ddafe"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eed32f33a0ea6ef36ccc1d37f8d17f28a1d6e8eefae5928f76aff8f1df85e67"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80df27dd8697242b904f4ea54820e2d98d3f51f91e97e358fc13359721233e4b"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:705f03cee0cb797256d54de6695ef219e5bc8c8120b6654dd460848d57a9af3d"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03246774131701de8e7059b2e382597da43144a9a7400f178b2a32feafc54bd5"}, + {file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8b5759063a6c940a69c728ea70d7c33583991c6982915a839c8da5f957e0103a"}, + {file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:677f23e32491520eebb19c99bb34675daf5410c449c13416f7f0d93e2cf5f981"}, + {file = "orjson-3.10.11-cp311-none-win32.whl", hash = "sha256:a11225d7b30468dcb099498296ffac36b4673a8398ca30fdaec1e6c20df6aa55"}, + {file = "orjson-3.10.11-cp311-none-win_amd64.whl", hash = "sha256:df8c677df2f9f385fcc85ab859704045fa88d4668bc9991a527c86e710392bec"}, + {file = "orjson-3.10.11-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:360a4e2c0943da7c21505e47cf6bd725588962ff1d739b99b14e2f7f3545ba51"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:496e2cb45de21c369079ef2d662670a4892c81573bcc143c4205cae98282ba97"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7dfa8db55c9792d53c5952900c6a919cfa377b4f4534c7a786484a6a4a350c19"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51f3382415747e0dbda9dade6f1e1a01a9d37f630d8c9049a8ed0e385b7a90c0"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f35a1b9f50a219f470e0e497ca30b285c9f34948d3c8160d5ad3a755d9299433"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2f3b7c5803138e67028dde33450e054c87e0703afbe730c105f1fcd873496d5"}, + {file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f91d9eb554310472bd09f5347950b24442600594c2edc1421403d7610a0998fd"}, + {file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dfbb2d460a855c9744bbc8e36f9c3a997c4b27d842f3d5559ed54326e6911f9b"}, + {file = "orjson-3.10.11-cp312-none-win32.whl", hash = "sha256:d4a62c49c506d4d73f59514986cadebb7e8d186ad510c518f439176cf8d5359d"}, + {file = "orjson-3.10.11-cp312-none-win_amd64.whl", hash = "sha256:f1eec3421a558ff7a9b010a6c7effcfa0ade65327a71bb9b02a1c3b77a247284"}, + {file = "orjson-3.10.11-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c46294faa4e4d0eb73ab68f1a794d2cbf7bab33b1dda2ac2959ffb7c61591899"}, + {file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52e5834d7d6e58a36846e059d00559cb9ed20410664f3ad156cd2cc239a11230"}, + {file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2fc947e5350fdce548bfc94f434e8760d5cafa97fb9c495d2fef6757aa02ec0"}, + {file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0efabbf839388a1dab5b72b5d3baedbd6039ac83f3b55736eb9934ea5494d258"}, + {file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a3f29634260708c200c4fe148e42b4aae97d7b9fee417fbdd74f8cfc265f15b0"}, + {file = "orjson-3.10.11-cp313-none-win32.whl", hash = "sha256:1a1222ffcee8a09476bbdd5d4f6f33d06d0d6642df2a3d78b7a195ca880d669b"}, + {file = "orjson-3.10.11-cp313-none-win_amd64.whl", hash = "sha256:bc274ac261cc69260913b2d1610760e55d3c0801bb3457ba7b9004420b6b4270"}, + {file = "orjson-3.10.11-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:19b3763e8bbf8ad797df6b6b5e0fc7c843ec2e2fc0621398534e0c6400098f87"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1be83a13312e5e58d633580c5eb8d0495ae61f180da2722f20562974188af205"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:afacfd1ab81f46dedd7f6001b6d4e8de23396e4884cd3c3436bd05defb1a6446"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb4d0bea56bba596723d73f074c420aec3b2e5d7d30698bc56e6048066bd560c"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96ed1de70fcb15d5fed529a656df29f768187628727ee2788344e8a51e1c1350"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4bfb30c891b530f3f80e801e3ad82ef150b964e5c38e1fb8482441c69c35c61c"}, + {file = "orjson-3.10.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d496c74fc2b61341e3cefda7eec21b7854c5f672ee350bc55d9a4997a8a95204"}, + {file = "orjson-3.10.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:655a493bac606655db9a47fe94d3d84fc7f3ad766d894197c94ccf0c5408e7d3"}, + {file = "orjson-3.10.11-cp38-none-win32.whl", hash = "sha256:b9546b278c9fb5d45380f4809e11b4dd9844ca7aaf1134024503e134ed226161"}, + {file = "orjson-3.10.11-cp38-none-win_amd64.whl", hash = "sha256:b592597fe551d518f42c5a2eb07422eb475aa8cfdc8c51e6da7054b836b26782"}, + {file = "orjson-3.10.11-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c95f2ecafe709b4e5c733b5e2768ac569bed308623c85806c395d9cca00e08af"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80c00d4acded0c51c98754fe8218cb49cb854f0f7eb39ea4641b7f71732d2cb7"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:461311b693d3d0a060439aa669c74f3603264d4e7a08faa68c47ae5a863f352d"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52ca832f17d86a78cbab86cdc25f8c13756ebe182b6fc1a97d534051c18a08de"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c57ea78a753812f528178aa2f1c57da633754c91d2124cb28991dab4c79a54"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7fcfc6f7ca046383fb954ba528587e0f9336828b568282b27579c49f8e16aad"}, + {file = "orjson-3.10.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:86b9dd983857970c29e4c71bb3e95ff085c07d3e83e7c46ebe959bac07ebd80b"}, + {file = "orjson-3.10.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4d83f87582d223e54efb2242a79547611ba4ebae3af8bae1e80fa9a0af83bb7f"}, + {file = "orjson-3.10.11-cp39-none-win32.whl", hash = "sha256:9fd0ad1c129bc9beb1154c2655f177620b5beaf9a11e0d10bac63ef3fce96950"}, + {file = "orjson-3.10.11-cp39-none-win_amd64.whl", hash = "sha256:10f416b2a017c8bd17f325fb9dee1fb5cdd7a54e814284896b7c3f2763faa017"}, + {file = "orjson-3.10.11.tar.gz", hash = "sha256:e35b6d730de6384d5b2dab5fd23f0d76fae8bbc8c353c2f78210aa5fa4beb3ef"}, ] [[package]] name = "packaging" -version = "24.1" +version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] @@ -1470,22 +1456,19 @@ files = [ [[package]] name = "pydantic" -version = "2.9.2" +version = "2.10.0" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, - {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, + {file = "pydantic-2.10.0-py3-none-any.whl", hash = "sha256:5e7807ba9201bdf61b1b58aa6eb690916c40a47acfb114b1b4fef3e7fd5b30fc"}, + {file = "pydantic-2.10.0.tar.gz", hash = "sha256:0aca0f045ff6e2f097f1fe89521115335f15049eeb8a7bef3dafe4b19a74e289"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.23.4" -typing-extensions = [ - {version = ">=4.6.1", markers = "python_version < \"3.13\""}, - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, -] +pydantic-core = "2.27.0" +typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] @@ -1493,100 +1476,111 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.23.4" +version = "2.27.0" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, - {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, - {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, - {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, - {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, - {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, - {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, - {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, - {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, - {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, - {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, - {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, - {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, - {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, - {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, - {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, - {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, - {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, - {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, - {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, - {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, - {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, - {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, - {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, - {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, - {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, - {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, - {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, - {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, - {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, - {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, - {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, - {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, - {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, - {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, - {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, - {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, + {file = "pydantic_core-2.27.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cd2ac6b919f7fed71b17fe0b4603c092a4c9b5bae414817c9c81d3c22d1e1bcc"}, + {file = "pydantic_core-2.27.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e015833384ca3e1a0565a79f5d953b0629d9138021c27ad37c92a9fa1af7623c"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db72e40628967f6dc572020d04b5f800d71264e0531c6da35097e73bdf38b003"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df45c4073bed486ea2f18757057953afed8dd77add7276ff01bccb79982cf46c"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:836a4bfe0cc6d36dc9a9cc1a7b391265bf6ce9d1eb1eac62ac5139f5d8d9a6fa"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bf1340ae507f6da6360b24179c2083857c8ca7644aab65807023cf35404ea8d"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ab325fc86fbc077284c8d7f996d904d30e97904a87d6fb303dce6b3de7ebba9"}, + {file = "pydantic_core-2.27.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1da0c98a85a6c6ed702d5556db3b09c91f9b0b78de37b7593e2de8d03238807a"}, + {file = "pydantic_core-2.27.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7b0202ebf2268954090209a84f9897345719e46a57c5f2c9b7b250ca0a9d3e63"}, + {file = "pydantic_core-2.27.0-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:35380671c3c921fe8adf31ad349dc6f7588b7e928dbe44e1093789734f607399"}, + {file = "pydantic_core-2.27.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b4c19525c3538fbc0bbda6229f9682fb8199ce9ac37395880e6952798e00373"}, + {file = "pydantic_core-2.27.0-cp310-none-win32.whl", hash = "sha256:333c840a1303d1474f491e7be0b718226c730a39ead0f7dab2c7e6a2f3855555"}, + {file = "pydantic_core-2.27.0-cp310-none-win_amd64.whl", hash = "sha256:99b2863c1365f43f74199c980a3d40f18a218fbe683dd64e470199db426c4d6a"}, + {file = "pydantic_core-2.27.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4523c4009c3f39d948e01962223c9f5538602e7087a628479b723c939fab262d"}, + {file = "pydantic_core-2.27.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:84af1cf7bfdcbc6fcf5a5f70cc9896205e0350306e4dd73d54b6a18894f79386"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e65466b31be1070b4a5b7dbfbd14b247884cb8e8b79c64fb0f36b472912dbaea"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a5c022bb0d453192426221605efc865373dde43b17822a264671c53b068ac20c"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bb69bf3b6500f195c3deb69c1205ba8fc3cb21d1915f1f158a10d6b1ef29b6a"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0aa4d1b2eba9a325897308b3124014a142cdccb9f3e016f31d3ebee6b5ea5e75"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e96ca781e0c01e32115912ebdf7b3fb0780ce748b80d7d28a0802fa9fbaf44e"}, + {file = "pydantic_core-2.27.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b872c86d8d71827235c7077461c502feb2db3f87d9d6d5a9daa64287d75e4fa0"}, + {file = "pydantic_core-2.27.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:82e1ad4ca170e8af4c928b67cff731b6296e6a0a0981b97b2eb7c275cc4e15bd"}, + {file = "pydantic_core-2.27.0-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:eb40f828bc2f73f777d1eb8fee2e86cd9692a4518b63b6b5aa8af915dfd3207b"}, + {file = "pydantic_core-2.27.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9a8fbf506fde1529a1e3698198fe64bfbe2e0c09557bc6a7dcf872e7c01fec40"}, + {file = "pydantic_core-2.27.0-cp311-none-win32.whl", hash = "sha256:24f984fc7762ed5f806d9e8c4c77ea69fdb2afd987b4fd319ef06c87595a8c55"}, + {file = "pydantic_core-2.27.0-cp311-none-win_amd64.whl", hash = "sha256:68950bc08f9735306322bfc16a18391fcaac99ded2509e1cc41d03ccb6013cfe"}, + {file = "pydantic_core-2.27.0-cp311-none-win_arm64.whl", hash = "sha256:3eb8849445c26b41c5a474061032c53e14fe92a11a5db969f722a2716cd12206"}, + {file = "pydantic_core-2.27.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:8117839a9bdbba86e7f9df57018fe3b96cec934c3940b591b0fd3fbfb485864a"}, + {file = "pydantic_core-2.27.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a291d0b4243a259c8ea7e2b84eb9ccb76370e569298875a7c5e3e71baf49057a"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84e35afd9e10b2698e6f2f32256678cb23ca6c1568d02628033a837638b3ed12"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:58ab0d979c969983cdb97374698d847a4acffb217d543e172838864636ef10d9"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0d06b667e53320332be2bf6f9461f4a9b78092a079b8ce8634c9afaa7e10cd9f"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78f841523729e43e3928a364ec46e2e3f80e6625a4f62aca5c345f3f626c6e8a"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:400bf470e4327e920883b51e255617dfe4496d4e80c3fea0b5a5d0bf2c404dd4"}, + {file = "pydantic_core-2.27.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:951e71da6c89d354572098bada5ba5b5dc3a9390c933af8a614e37755d3d1840"}, + {file = "pydantic_core-2.27.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2a51ce96224eadd1845150b204389623c8e129fde5a67a84b972bd83a85c6c40"}, + {file = "pydantic_core-2.27.0-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:483c2213a609e7db2c592bbc015da58b6c75af7360ca3c981f178110d9787bcf"}, + {file = "pydantic_core-2.27.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:359e7951f04ad35111b5ddce184db3391442345d0ab073aa63a95eb8af25a5ef"}, + {file = "pydantic_core-2.27.0-cp312-none-win32.whl", hash = "sha256:ee7d9d5537daf6d5c74a83b38a638cc001b648096c1cae8ef695b0c919d9d379"}, + {file = "pydantic_core-2.27.0-cp312-none-win_amd64.whl", hash = "sha256:2be0ad541bb9f059954ccf8877a49ed73877f862529575ff3d54bf4223e4dd61"}, + {file = "pydantic_core-2.27.0-cp312-none-win_arm64.whl", hash = "sha256:6e19401742ed7b69e51d8e4df3c03ad5ec65a83b36244479fd70edde2828a5d9"}, + {file = "pydantic_core-2.27.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:5f2b19b8d6fca432cb3acf48cf5243a7bf512988029b6e6fd27e9e8c0a204d85"}, + {file = "pydantic_core-2.27.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c86679f443e7085ea55a7376462553996c688395d18ef3f0d3dbad7838f857a2"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:510b11e9c3b1a852876d1ccd8d5903684336d635214148637ceb27366c75a467"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb704155e73b833801c247f39d562229c0303f54770ca14fb1c053acb376cf10"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ce048deb1e033e7a865ca384770bccc11d44179cf09e5193a535c4c2f497bdc"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58560828ee0951bb125c6f2862fbc37f039996d19ceb6d8ff1905abf7da0bf3d"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abb4785894936d7682635726613c44578c420a096729f1978cd061a7e72d5275"}, + {file = "pydantic_core-2.27.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2883b260f7a93235488699d39cbbd94fa7b175d3a8063fbfddd3e81ad9988cb2"}, + {file = "pydantic_core-2.27.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c6fcb3fa3855d583aa57b94cf146f7781d5d5bc06cb95cb3afece33d31aac39b"}, + {file = "pydantic_core-2.27.0-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:e851a051f7260e6d688267eb039c81f05f23a19431bd7dfa4bf5e3cb34c108cd"}, + {file = "pydantic_core-2.27.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:edb1bfd45227dec8d50bc7c7d86463cd8728bcc574f9b07de7369880de4626a3"}, + {file = "pydantic_core-2.27.0-cp313-none-win32.whl", hash = "sha256:678f66462058dd978702db17eb6a3633d634f7aa0deaea61e0a674152766d3fc"}, + {file = "pydantic_core-2.27.0-cp313-none-win_amd64.whl", hash = "sha256:d28ca7066d6cdd347a50d8b725dc10d9a1d6a1cce09836cf071ea6a2d4908be0"}, + {file = "pydantic_core-2.27.0-cp313-none-win_arm64.whl", hash = "sha256:6f4a53af9e81d757756508b57cae1cf28293f0f31b9fa2bfcb416cc7fb230f9d"}, + {file = "pydantic_core-2.27.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:e9f9feee7f334b72ceae46313333d002b56f325b5f04271b4ae2aadd9e993ae4"}, + {file = "pydantic_core-2.27.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:225bfff5d425c34e1fd562cef52d673579d59b967d9de06178850c4802af9039"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921ad596ff1a82f9c692b0758c944355abc9f0de97a4c13ca60ffc6d8dc15d4"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6354e18a9be37bfa124d6b288a87fb30c673745806c92956f1a25e3ae6e76b96"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ee4c2a75af9fe21269a4a0898c5425afb01af1f5d276063f57e2ae1bc64e191"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c91e3c04f5191fd3fb68764bddeaf02025492d5d9f23343b283870f6ace69708"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a6ebfac28fd51890a61df36ef202adbd77d00ee5aca4a3dadb3d9ed49cfb929"}, + {file = "pydantic_core-2.27.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36aa167f69d8807ba7e341d67ea93e50fcaaf6bc433bb04939430fa3dab06f31"}, + {file = "pydantic_core-2.27.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e8d89c276234579cd3d095d5fa2a44eb10db9a218664a17b56363cddf226ff3"}, + {file = "pydantic_core-2.27.0-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:5cc822ab90a70ea3a91e6aed3afac570b276b1278c6909b1d384f745bd09c714"}, + {file = "pydantic_core-2.27.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e15315691fe2253eb447503153acef4d7223dfe7e7702f9ed66539fcd0c43801"}, + {file = "pydantic_core-2.27.0-cp38-none-win32.whl", hash = "sha256:dfa5f5c0a4c8fced1422dc2ca7eefd872d5d13eb33cf324361dbf1dbfba0a9fe"}, + {file = "pydantic_core-2.27.0-cp38-none-win_amd64.whl", hash = "sha256:513cb14c0cc31a4dfd849a4674b20c46d87b364f997bbcb02282306f5e187abf"}, + {file = "pydantic_core-2.27.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:4148dc9184ab79e356dc00a4199dc0ee8647973332cb385fc29a7cced49b9f9c"}, + {file = "pydantic_core-2.27.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5fc72fbfebbf42c0856a824b8b0dc2b5cd2e4a896050281a21cfa6fed8879cb1"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:185ef205256cd8b38431205698531026979db89a79587725c1e55c59101d64e9"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:395e3e1148fa7809016231f8065f30bb0dc285a97b4dc4360cd86e17bab58af7"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33d14369739c5d07e2e7102cdb0081a1fa46ed03215e07f097b34e020b83b1ae"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7820bb0d65e3ce1e3e70b6708c2f66143f55912fa02f4b618d0f08b61575f12"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43b61989068de9ce62296cde02beffabcadb65672207fc51e7af76dca75e6636"}, + {file = "pydantic_core-2.27.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15e350efb67b855cd014c218716feea4986a149ed1f42a539edd271ee074a196"}, + {file = "pydantic_core-2.27.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:433689845288f9a1ee5714444e65957be26d30915f7745091ede4a83cfb2d7bb"}, + {file = "pydantic_core-2.27.0-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:3fd8bc2690e7c39eecdf9071b6a889ce7b22b72073863940edc2a0a23750ca90"}, + {file = "pydantic_core-2.27.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:884f1806609c2c66564082540cffc96868c5571c7c3cf3a783f63f2fb49bd3cd"}, + {file = "pydantic_core-2.27.0-cp39-none-win32.whl", hash = "sha256:bf37b72834e7239cf84d4a0b2c050e7f9e48bced97bad9bdf98d26b8eb72e846"}, + {file = "pydantic_core-2.27.0-cp39-none-win_amd64.whl", hash = "sha256:31a2cae5f059329f9cfe3d8d266d3da1543b60b60130d186d9b6a3c20a346361"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:4fb49cfdb53af5041aba909be00cccfb2c0d0a2e09281bf542371c5fd36ad04c"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:49633583eb7dc5cba61aaf7cdb2e9e662323ad394e543ee77af265736bcd3eaa"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:153017e3d6cd3ce979de06d84343ca424bb6092727375eba1968c8b4693c6ecb"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff63a92f6e249514ef35bc795de10745be0226eaea06eb48b4bbeaa0c8850a4a"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5982048129f40b082c2654de10c0f37c67a14f5ff9d37cf35be028ae982f26df"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:91bc66f878557313c2a6bcf396e7befcffe5ab4354cfe4427318968af31143c3"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:68ef5377eb582fa4343c9d0b57a5b094046d447b4c73dd9fbd9ffb216f829e7d"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c5726eec789ee38f2c53b10b1821457b82274f81f4f746bb1e666d8741fcfadb"}, + {file = "pydantic_core-2.27.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c0c431e4be5c1a0c6654e0c31c661cd89e0ca956ef65305c3c3fd96f4e72ca39"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8e21d927469d04b39386255bf00d0feedead16f6253dcc85e9e10ddebc334084"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b51f964fcbb02949fc546022e56cdb16cda457af485e9a3e8b78ac2ecf5d77e"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a7fd4de38f7ff99a37e18fa0098c3140286451bc823d1746ba80cec5b433a1"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fda87808429c520a002a85d6e7cdadbf58231d60e96260976c5b8f9a12a8e13"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8a150392102c402c538190730fda06f3bce654fc498865579a9f2c1d2b425833"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c9ed88b398ba7e3bad7bd64d66cc01dcde9cfcb7ec629a6fd78a82fa0b559d78"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:9fe94d9d2a2b4edd7a4b22adcd45814b1b59b03feb00e56deb2e89747aec7bfe"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d8b5ee4ae9170e2775d495b81f414cc20268041c42571530513496ba61e94ba3"}, + {file = "pydantic_core-2.27.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d29e235ce13c91902ef3efc3d883a677655b3908b1cbc73dee816e5e1f8f7739"}, + {file = "pydantic_core-2.27.0.tar.gz", hash = "sha256:f57783fbaf648205ac50ae7d646f27582fc706be3977e87c3c124e7a92407b10"}, ] [package.dependencies] @@ -1594,13 +1588,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pydantic-settings" -version = "2.6.0" +version = "2.6.1" description = "Settings management using Pydantic" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_settings-2.6.0-py3-none-any.whl", hash = "sha256:4a819166f119b74d7f8c765196b165f95cc7487ce58ea27dec8a5a26be0970e0"}, - {file = "pydantic_settings-2.6.0.tar.gz", hash = "sha256:44a1804abffac9e6a30372bb45f6cafab945ef5af25e66b1c634c01dd39e0188"}, + {file = "pydantic_settings-2.6.1-py3-none-any.whl", hash = "sha256:7fb0637c786a558d3103436278a7c4f1cfd29ba8973238a50c5bb9a55387da87"}, + {file = "pydantic_settings-2.6.1.tar.gz", hash = "sha256:e0f92546d8a9923cb8941689abf85d6601a8c19a23e97a34b2964a2e3f813ca0"}, ] [package.dependencies] @@ -1890,105 +1884,105 @@ files = [ [[package]] name = "regex" -version = "2024.9.11" +version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" files = [ - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"}, - {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"}, - {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"}, - {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"}, - {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"}, - {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"}, - {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"}, - {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"}, - {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"}, - {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"}, - {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"}, - {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"}, - {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"}, - {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"}, + {file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"}, + {file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"}, + {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"}, + {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"}, + {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"}, + {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"}, + {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"}, + {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"}, + {file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"}, + {file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"}, + {file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"}, + {file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"}, + {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, ] [[package]] @@ -2028,29 +2022,29 @@ requests = ">=2.0.1,<3.0.0" [[package]] name = "ruff" -version = "0.7.1" +version = "0.7.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.7.1-py3-none-linux_armv6l.whl", hash = "sha256:cb1bc5ed9403daa7da05475d615739cc0212e861b7306f314379d958592aaa89"}, - {file = "ruff-0.7.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27c1c52a8d199a257ff1e5582d078eab7145129aa02721815ca8fa4f9612dc35"}, - {file = "ruff-0.7.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:588a34e1ef2ea55b4ddfec26bbe76bc866e92523d8c6cdec5e8aceefeff02d99"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94fc32f9cdf72dc75c451e5f072758b118ab8100727168a3df58502b43a599ca"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:985818742b833bffa543a84d1cc11b5e6871de1b4e0ac3060a59a2bae3969250"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32f1e8a192e261366c702c5fb2ece9f68d26625f198a25c408861c16dc2dea9c"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:699085bf05819588551b11751eff33e9ca58b1b86a6843e1b082a7de40da1565"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344cc2b0814047dc8c3a8ff2cd1f3d808bb23c6658db830d25147339d9bf9ea7"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4316bbf69d5a859cc937890c7ac7a6551252b6a01b1d2c97e8fc96e45a7c8b4a"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79d3af9dca4c56043e738a4d6dd1e9444b6d6c10598ac52d146e331eb155a8ad"}, - {file = "ruff-0.7.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c5c121b46abde94a505175524e51891f829414e093cd8326d6e741ecfc0a9112"}, - {file = "ruff-0.7.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8422104078324ea250886954e48f1373a8fe7de59283d747c3a7eca050b4e378"}, - {file = "ruff-0.7.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:56aad830af8a9db644e80098fe4984a948e2b6fc2e73891538f43bbe478461b8"}, - {file = "ruff-0.7.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:658304f02f68d3a83c998ad8bf91f9b4f53e93e5412b8f2388359d55869727fd"}, - {file = "ruff-0.7.1-py3-none-win32.whl", hash = "sha256:b517a2011333eb7ce2d402652ecaa0ac1a30c114fbbd55c6b8ee466a7f600ee9"}, - {file = "ruff-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f38c41fcde1728736b4eb2b18850f6d1e3eedd9678c914dede554a70d5241307"}, - {file = "ruff-0.7.1-py3-none-win_arm64.whl", hash = "sha256:19aa200ec824c0f36d0c9114c8ec0087082021732979a359d6f3c390a6ff2a37"}, - {file = "ruff-0.7.1.tar.gz", hash = "sha256:9d8a41d4aa2dad1575adb98a82870cf5db5f76b2938cf2206c22c940034a36f4"}, + {file = "ruff-0.7.4-py3-none-linux_armv6l.whl", hash = "sha256:a4919925e7684a3f18e18243cd6bea7cfb8e968a6eaa8437971f681b7ec51478"}, + {file = "ruff-0.7.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfb365c135b830778dda8c04fb7d4280ed0b984e1aec27f574445231e20d6c63"}, + {file = "ruff-0.7.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:63a569b36bc66fbadec5beaa539dd81e0527cb258b94e29e0531ce41bacc1f20"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d06218747d361d06fd2fdac734e7fa92df36df93035db3dc2ad7aa9852cb109"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0cea28d0944f74ebc33e9f934238f15c758841f9f5edd180b5315c203293452"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80094ecd4793c68b2571b128f91754d60f692d64bc0d7272ec9197fdd09bf9ea"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:997512325c6620d1c4c2b15db49ef59543ef9cd0f4aa8065ec2ae5103cedc7e7"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00b4cf3a6b5fad6d1a66e7574d78956bbd09abfd6c8a997798f01f5da3d46a05"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7dbdc7d8274e1422722933d1edddfdc65b4336abf0b16dfcb9dedd6e6a517d06"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e92dfb5f00eaedb1501b2f906ccabfd67b2355bdf117fea9719fc99ac2145bc"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3bd726099f277d735dc38900b6a8d6cf070f80828877941983a57bca1cd92172"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2e32829c429dd081ee5ba39aef436603e5b22335c3d3fff013cd585806a6486a"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:662a63b4971807623f6f90c1fb664613f67cc182dc4d991471c23c541fee62dd"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:876f5e09eaae3eb76814c1d3b68879891d6fde4824c015d48e7a7da4cf066a3a"}, + {file = "ruff-0.7.4-py3-none-win32.whl", hash = "sha256:75c53f54904be42dd52a548728a5b572344b50d9b2873d13a3f8c5e3b91f5cac"}, + {file = "ruff-0.7.4-py3-none-win_amd64.whl", hash = "sha256:745775c7b39f914238ed1f1b0bebed0b9155a17cd8bc0b08d3c87e4703b990d6"}, + {file = "ruff-0.7.4-py3-none-win_arm64.whl", hash = "sha256:11bff065102c3ae9d3ea4dc9ecdfe5a5171349cdd0787c1fc64761212fc9cf1f"}, + {file = "ruff-0.7.4.tar.gz", hash = "sha256:cd12e35031f5af6b9b93715d8c4f40360070b2041f81273d0527683d5708fce2"}, ] [[package]] @@ -2065,110 +2059,110 @@ files = [ [[package]] name = "simsimd" -version = "5.9.4" +version = "5.9.11" description = "Portable mixed-precision BLAS-like vector math library for x86 and ARM" optional = false python-versions = "*" files = [ - {file = "simsimd-5.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9aa3bb197bea5bf92ff7cbb33d4b5eea10f37d5122599142555eb556714ee542"}, - {file = "simsimd-5.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2e580b93b82fa60b6c54aabfe7cfac7a4e5bdee4556a99c77cf51f8a35849ad9"}, - {file = "simsimd-5.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:604b3d2622b37713a7adcfd7e2d0d432968ba988ec7bcd9ed3f631eacfc9be0e"}, - {file = "simsimd-5.9.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc20c854bba8c9b6f1bcae41f46c8ff81402d80bba4658226c45f254d97d393b"}, - {file = "simsimd-5.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d8c493ebbba65bd8305f2d5d7046b520bf39d020720700b23162eb8ba47b040"}, - {file = "simsimd-5.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ba24d583542bfea4d7b70eafdd528d774486df3667101f280b5d6c89e848a32"}, - {file = "simsimd-5.9.4-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:e45e095fef614a73b66c24b96f7c93de33c8d62ce756d9b2dc0c181c8ee57ca7"}, - {file = "simsimd-5.9.4-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b6a68f3c549d7c9bad134c39dd0186ee43943ace80f20e1f433b4a0e85af6158"}, - {file = "simsimd-5.9.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6559e437f4351664d46e51ca9e692850c7102e301431e78a2580ffc1f5693c22"}, - {file = "simsimd-5.9.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:070b50f3beee90ec842392b8fb888247987053235bebf046b160e2491c315699"}, - {file = "simsimd-5.9.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1d275e9ab98e652b5f9d50ef0a8191dd6a7cd15a900afc477ecd8d9267efa416"}, - {file = "simsimd-5.9.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:148a4a698d694aa5c27cdf48cc15bd7ed7c3fc62b85d1e69ccd5171955112cf5"}, - {file = "simsimd-5.9.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8b6697b66144273203696c5b9f8af475da9a1967b048de931c8238426edb6d47"}, - {file = "simsimd-5.9.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9b0f1ff1cf35e84a52a506ab23d1ed3800dbfe9ceb4f3c2f9e88627abcbf01fe"}, - {file = "simsimd-5.9.4-cp310-cp310-win32.whl", hash = "sha256:5244a0409121d4caf13d1eb2dd017ae5106a92119368a7e67e5860c443faec23"}, - {file = "simsimd-5.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:e57b292964db34521d3803d6eae8f51fca5e1c76d1c16bd28aa50102c0ce93aa"}, - {file = "simsimd-5.9.4-cp310-cp310-win_arm64.whl", hash = "sha256:f335d91cae89f633b44469128dfe7f4b2c7cdbe4f46538eecb59019dd538f067"}, - {file = "simsimd-5.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6d4cc3bdf78380ebd7eb4da45b83b80f5c5b5ae0538de36574f7fa36069466e5"}, - {file = "simsimd-5.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fc707a98ea34c51c7982145b830548a4af3902efec7bb0b42a4fc650f3567d46"}, - {file = "simsimd-5.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f4a4a0c472fff6c631af0d8b43b1e99e5ec8c8b9e3bfb7ac7d0e4fada0efa25b"}, - {file = "simsimd-5.9.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:088d06f8c4afb8cb7e7f174774253f8d970c68db92a06de6007f24ea7c98886e"}, - {file = "simsimd-5.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:689523f10440bb4f9c9c18618e5fa57516747b5c4b0786401c197604f9ae9e1e"}, - {file = "simsimd-5.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af1c689e5cc520d583648d796cbf839329b96e1d476bef2cbb9812c095fa6b1"}, - {file = "simsimd-5.9.4-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:f155926c01c22684da74cf63290b72fa8b8e01d04ae07e95c080900b35c48896"}, - {file = "simsimd-5.9.4-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d292270207d564f8071b07301cce4c3b1c88c28141ac2839e30c415686ec66d6"}, - {file = "simsimd-5.9.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3f55a79f049a6d468540b313e6c5bf3e741e9b1de931aeb278372d2ff29f35ca"}, - {file = "simsimd-5.9.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:6bef5a36743bf9d6f6976c0e7e889a6b44773d944d70b15223f69786ea5e2364"}, - {file = "simsimd-5.9.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:702b18ee287d734c746bf8977040cd89873e19331dff31000e928c0409f93042"}, - {file = "simsimd-5.9.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ae7139168667c3f8ca89fbba2af3df9250dc6f49ad40272c3bbb5924304b3d84"}, - {file = "simsimd-5.9.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2d63f37571baaea25fce9fa4973ff307e88969c7ef04717361d99cb844308c98"}, - {file = "simsimd-5.9.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4dd2a95c7ffbc04e6cd0833985b80ac8fa8b040f3b619616b542e931b83313b3"}, - {file = "simsimd-5.9.4-cp311-cp311-win32.whl", hash = "sha256:5b89e5536cc357cc4fb08839467b5b63ab3671c564e52bca463e7615852cc0ad"}, - {file = "simsimd-5.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:52cb4e52f248b84db641bd92d6a5f16fd1c085ab194e8f003a65a69d52905b5e"}, - {file = "simsimd-5.9.4-cp311-cp311-win_arm64.whl", hash = "sha256:e418f756a2eebcadf983c24dbf4f68b0c9200aafddad864022ed15c1b2feaf85"}, - {file = "simsimd-5.9.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:20041c10750feb69d6d62ed6010e6357ccec7cb8e1eb97a8d2518c23965d1a1b"}, - {file = "simsimd-5.9.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:83813b3e325fcb69c81c2b2cdb08fc0e1b78479eea3b134b07e6cf216c9b954d"}, - {file = "simsimd-5.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:704138b45d54ae95db6ec7b239b6fc410c93f28af9e0a1e31469225c26aa59a8"}, - {file = "simsimd-5.9.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7be7482084d836d90384b818a37299591569811351548348b4d60c1d90cee4a"}, - {file = "simsimd-5.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afef7ec944dfb27c997d26e6c4bf3f76b2d211f2e644765025fbaeb108cef609"}, - {file = "simsimd-5.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc0412d6b238bba6be3719361d04b22a3e98e9b7cd0e24d5e810f7643db79513"}, - {file = "simsimd-5.9.4-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:94422a3c723303b79a0ab75cb64ab07e3d9d9f3e376b1eda7a0ffd9de75f32a7"}, - {file = "simsimd-5.9.4-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:7072ba69894e212756f1ff2304d89972c2d49d7cb524426abdac6551c5d29a07"}, - {file = "simsimd-5.9.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1820794cf0070b67579a7868b63f209d3de6ad5e23beabe84f6f1d97d2eee9ff"}, - {file = "simsimd-5.9.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:08820f1380696adb04709d6e59ab89dd1402419eb894f3d6742bf13f52c3d532"}, - {file = "simsimd-5.9.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c7894bb8d476cbe6bd216dac86be2d39b589a5f69306e4d30df1d49cc55da83e"}, - {file = "simsimd-5.9.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7ab6ba0bb0616ac8103ff51f580aeece31967ecc481000ca3e4b119ce4981bdc"}, - {file = "simsimd-5.9.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:988b116ca7664615b8af80ef8c7d50e5aee8239792af84c7a0236cbfb506b8f0"}, - {file = "simsimd-5.9.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:761c30ee3f250013ba6c0a98ae043c3df372593acefd9a88d205a50104613860"}, - {file = "simsimd-5.9.4-cp312-cp312-win32.whl", hash = "sha256:ebe8b0fe9fe68b95f7068cc533a00a6bace8390d6fa69757524b52ce3e94d3a8"}, - {file = "simsimd-5.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:99fa8a36faa904382e23b4f5c92410809ea73cc6977bdab6db7aa263c03af45c"}, - {file = "simsimd-5.9.4-cp312-cp312-win_arm64.whl", hash = "sha256:e740219884f3a602726ecd88e58afcdc1a5d475e9eb5c5780d90e120a91599b2"}, - {file = "simsimd-5.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ddb59bbd3060b2c266274a62279da8f49766e2e89a690d0b0f26b7dc384c171"}, - {file = "simsimd-5.9.4-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfbaeb368186628f3b028308f5e7e76a4508eb3ff6ec5dcd378f9502a0068a99"}, - {file = "simsimd-5.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bba3e2740fe17117ea06314c8c8b2e0ce2802d24b9c3d609feaddbd18b45ea3"}, - {file = "simsimd-5.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e19db5973c8ab88462d366eba1a3355986963e154cf404cd997c5bfd61f545b7"}, - {file = "simsimd-5.9.4-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:0206b4a5941e9cf3fe6c13cdb368810bceecfbd925699a039cfaa0186bf880f0"}, - {file = "simsimd-5.9.4-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:8dccd6843294ed5af3b3b3f1e105af79913537caf13fb66bf0286c8edc37cabc"}, - {file = "simsimd-5.9.4-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:95da968cf47c28ede55c1117182227c3feaae14e69236a93f88ac4ebf0164dbb"}, - {file = "simsimd-5.9.4-cp37-cp37m-musllinux_1_2_armv7l.whl", hash = "sha256:dff91aedba35410c956c0f58bc7fac3dbb857c2de0da7fe7462922767f1f752d"}, - {file = "simsimd-5.9.4-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:eefba8563e819f9498cdb99d0955547d34c743404b9b6e77324f5996ba6fac69"}, - {file = "simsimd-5.9.4-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:1854d7bd381cd784f0eba401d3c7a473f215907c69ceba37ff33d849c906c294"}, - {file = "simsimd-5.9.4-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:77d00679f537127f3ae81a5be8cec59e2dd646b4f038962a5e51c6b5fc8ff638"}, - {file = "simsimd-5.9.4-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:2f891052a796b329a1b9524b291e46ed27d3c15263e28af1beb71709b3dcdbde"}, - {file = "simsimd-5.9.4-cp37-cp37m-win32.whl", hash = "sha256:bd576d41b4c34f982950d12e0e44cd1c3a036e68ef64a502bd77575f1e9cb133"}, - {file = "simsimd-5.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:b049767596826931f603f7dd7078deb4df6e5f5c72e781f120b9e39d29da1d7c"}, - {file = "simsimd-5.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e52a9ffca8d369d6896b17ef146510dd245bb75183d4cd9853c5b798bcc54cd6"}, - {file = "simsimd-5.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0d072d0447ea391862e1f4b73fa252e05a50a5b521a054f038e5176ee226d6c9"}, - {file = "simsimd-5.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a60b8b082d395a33f2598689f9becd6d76a7c99ce6265cfdac9770e78074129d"}, - {file = "simsimd-5.9.4-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9f4c33ea0d63669d4f0274490fe3d8a1bfc38e63fffbdb2cc278413ec7cb2fa8"}, - {file = "simsimd-5.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64babccde5c772cb70e1bc181350c7410a09a3b74d8d4c75a80c9b3c58f23fac"}, - {file = "simsimd-5.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1bedd64a05d15f1484c51d301508036f8b273bf27853c3ab46bb48ab5c8866c0"}, - {file = "simsimd-5.9.4-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:d0c544f904ee49c1b09ae57243eb5b65322cbcafd97f90d1387a701abb7992fe"}, - {file = "simsimd-5.9.4-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:af30676f5e4fbbc019d01ffe81a2f13408fb06ac00093b609dfa290cbed2c49b"}, - {file = "simsimd-5.9.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ba40d83fc876f340a8e3dea63300c312e79969b708ac0821e97cdb32ada63fb1"}, - {file = "simsimd-5.9.4-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:522451990de7c9882ff725ddd07e97908bcb1b9717b8cf4d91863d756538a9a0"}, - {file = "simsimd-5.9.4-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:336005c695576a3d075a8d2850bb7aacdaabff972580c7a6a34bd99ea7c81328"}, - {file = "simsimd-5.9.4-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ff2ab1cff5a830ec98400d39c6781e3897556702bf8b92ba10d58d292498103c"}, - {file = "simsimd-5.9.4-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:81ad083a65d6a4db620d8c0c70b40301d56338e49cc60ed76f3e13df1ce85a91"}, - {file = "simsimd-5.9.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:1c2064e42534ad094cc22c0e052d4bac364e29937d52ff6fda0560b81db7ac9d"}, - {file = "simsimd-5.9.4-cp38-cp38-win32.whl", hash = "sha256:06f32e34440240a491abad5859f46a316811664d117364e71fa392151a17b7b5"}, - {file = "simsimd-5.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:ecc47cb003fc59fb25806500b70555d5aafaee02f8b1f827e290b455eaed60f3"}, - {file = "simsimd-5.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:19d3fb232401509b07a544fdb3d2e58f9d2f40ece682af75295f2ef2eaa9da83"}, - {file = "simsimd-5.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:02537903ba4a06e0bc5a918aaeb01cf482de3d2e3b56be594307e7b79db22e52"}, - {file = "simsimd-5.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4d243ad10f9a3785971953c0a1580fddd507150baa65efd9ccd794a3e4922792"}, - {file = "simsimd-5.9.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2d737e8bbe39ffd56ba9628b84567c63dd8b659e66c397fd80e3f63222a150d"}, - {file = "simsimd-5.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e64c5893b8ac5ca8ff3b109b7715f4e3653b5d3993281c3ceea3acb9e041011e"}, - {file = "simsimd-5.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:342affe60887f546edad4e2788d6fb9208b81f35f465f84045ab779846847731"}, - {file = "simsimd-5.9.4-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:808df09687e2cb8844b74539ca807a7aa3e1475ed030e5214bf1038bdfabdc9d"}, - {file = "simsimd-5.9.4-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:8abd16873080c0fade2057cf371d02aa73e04cc1d1f5c16169dcd8a9cdbdadbc"}, - {file = "simsimd-5.9.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a6b575b7d6a74cef9e87a3f3901fd7147891bdce130d655ff498eadb9b3d49bb"}, - {file = "simsimd-5.9.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:d3fac32be9f6cb4b989a5c6ca79852f3731286a2ef2b65128350d7218cb84258"}, - {file = "simsimd-5.9.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ea76107e145e3317c7f72a906a80e4714b07ecaeb69f1b2e373e31db0c85be1e"}, - {file = "simsimd-5.9.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:0fa47f8b255f7c02ec2d22a58a1300026ae4e875791cd2696f1201ac3da00e93"}, - {file = "simsimd-5.9.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e5e1961a04a2365b4d5cfdab8463729aa8765e49f3c59cd098fdffce8402c15e"}, - {file = "simsimd-5.9.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89fd6e3a4453b6068e6ca43801c407fc8d5320ef6eda654ca2b470986f423855"}, - {file = "simsimd-5.9.4-cp39-cp39-win32.whl", hash = "sha256:3c60915dfbf21a7c68e409dc159a29c3a74adbdecd1961d89206fc8d86ac9000"}, - {file = "simsimd-5.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:32d484a86aef01aa17bbde89d690319fe204399eab0120b719b7aefaea1f7a45"}, - {file = "simsimd-5.9.4-cp39-cp39-win_arm64.whl", hash = "sha256:def1b28b4520dc304f29ab1dd8cd5d16dd6f7ee0aec1a15e3e9a3dca736cd7dd"}, - {file = "simsimd-5.9.4.tar.gz", hash = "sha256:f75115884854e4576130031636288294ff7045ec9812482d6a01f4f32702482b"}, + {file = "simsimd-5.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:84a534ccd04d7aa5c4539817e09f94c5c5d4bfee9d72078b89b7e18c811100ac"}, + {file = "simsimd-5.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:323468e396f94eda2494df6b85214f6e4b16812e28cab5eab5ced507aa7221de"}, + {file = "simsimd-5.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f142bbefed325ac74d7209044b2fa777a6737a907fbd39359db6c72271204cfa"}, + {file = "simsimd-5.9.11-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:59a89ea757ef18014a56c16096cd80e85ec5f2d71d23068d751747e6154229d4"}, + {file = "simsimd-5.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f12d43eaab7bae5ae3e9f0fcbbbe8811eb1e28bb9b7bb68b8a78c8afdcca16f3"}, + {file = "simsimd-5.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca73c0161f47681a2b5e266dfe5fee5b75bc0c0093b978641dd672f38c9c8abf"}, + {file = "simsimd-5.9.11-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:856d59a644e3208512895aa19c52d3fa28f7359ccc6a526c99ec40a0c94d014c"}, + {file = "simsimd-5.9.11-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:79a2a722ccce98375a3ff7033ad21a323b03f41032b004d43817a81baf873b53"}, + {file = "simsimd-5.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:de94d6457949888f17a94ddf165f179ca4f8b83cc9eaedf9a97daeddceae829d"}, + {file = "simsimd-5.9.11-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:ecec772486848ccf52e076781591f467c339c6b19dcf66720f8d5b0ede47717d"}, + {file = "simsimd-5.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a8a211130e8499c60463b77208f51bee04ddb8d3dfece7371bb5e5b878105cdc"}, + {file = "simsimd-5.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fb7b5c3348a8ba2c4f8dbc16925e83ac4556ff7c98a086008c77d7ee192449b0"}, + {file = "simsimd-5.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:accaf43fdc9a32c5fb3cc501af91e8a6eb4443f871598f66282e83e705096627"}, + {file = "simsimd-5.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a2b2113f6cee7882f58adab0a7b8939075938addb77df28f5c4f5f88a38a4150"}, + {file = "simsimd-5.9.11-cp310-cp310-win32.whl", hash = "sha256:3b9b112bd2d3f4579b7946463ccaa245cae21ac673c19401b8655ed0984b08dc"}, + {file = "simsimd-5.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:b5030de0fa780e2f33b7b9fc176cea6455205c275bb23fba952c4f25a87fa30e"}, + {file = "simsimd-5.9.11-cp310-cp310-win_arm64.whl", hash = "sha256:a1429f7c48ac6743414e6877554ed18d62e03338162bcc506218869467790ed0"}, + {file = "simsimd-5.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:dc3161c6e2f966b06b407ca16a01157e4f62aeb54849102b2381c75afe96de63"}, + {file = "simsimd-5.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6a2e1b942270c0e13a242980f6ee28791cbef68842b1365510422e3f3b1108e5"}, + {file = "simsimd-5.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a77dd15b362f71ea95ff9a4eba895d34740261ff56092303e18c7b5584b86eb4"}, + {file = "simsimd-5.9.11-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79f0f9a2aaea47b7feda669592d40c41a3c803d9207ecb96b551e2b10badeb61"}, + {file = "simsimd-5.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3976480e40074dd8ab2e327b0620791f37f88958e23659848d65e9eaee075d69"}, + {file = "simsimd-5.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7a925d2ced1d55bb994a77d563cc1cd9be6b628e555d55782ff4844fd2eff40e"}, + {file = "simsimd-5.9.11-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2f08648184772dde6286a532f4034b56be62407d2240f0fa50e9896dd269fd9f"}, + {file = "simsimd-5.9.11-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:c9073d17f1ec774c3be6f3ae2bb6022cf329961ead6a53540a852f58a56d80f1"}, + {file = "simsimd-5.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f318c4aaf8d8fbe168da6bab406a598e8a8710509bcfdb758d4f27ee66991d19"}, + {file = "simsimd-5.9.11-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:332c1abf09ffbc56e8ffa0d4fe91e6505dcc6fe8a4c3212922d7e45047b55210"}, + {file = "simsimd-5.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f48db0b476dc4f3805cd83050483a3eda59b2c1e4861ca634382c0135d5848c3"}, + {file = "simsimd-5.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31f5e8b8210ac600910fa0631f094c54564e363ee72881194578ba2630721fce"}, + {file = "simsimd-5.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:32f0980848ca322fa81f8e9b73291ab780c24fdb23ad976668967830c99cfe09"}, + {file = "simsimd-5.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:866adcbfb93840e5b1915e834afda3b244fda8895aa3bdc96bbd0d51f24898f7"}, + {file = "simsimd-5.9.11-cp311-cp311-win32.whl", hash = "sha256:4b4f77da77016b8f7c2ccc8c2203d7f59112b471dc3ee047fdce72fb63f63647"}, + {file = "simsimd-5.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:706e5db8f9b5d3fea9cbf549323c57ef8529d4536cf66784ab7926fb31c3f3d3"}, + {file = "simsimd-5.9.11-cp311-cp311-win_arm64.whl", hash = "sha256:605af1cf0d903f31dc488a94e2e6734d3047baa41d40b362fb3285144b383f63"}, + {file = "simsimd-5.9.11-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b614a22531f35f9dc752c09da96cc3457f15c5d0ca3e2a12d13d54d2441a476d"}, + {file = "simsimd-5.9.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:95f984148040fd6ffec3bdd8ad68a1750c5bda16c226ff14ccdfc1439705a3b4"}, + {file = "simsimd-5.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46afcd0b7b59fefffdfb91b0e83e881e56b536acb072343cf73d49fbad83bb8d"}, + {file = "simsimd-5.9.11-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc6286d20cf837d26a3943504eecb4db5b68046c06797ac125fbad6b5134ee3e"}, + {file = "simsimd-5.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7be158270caeb2e3daf616e052690a5bea41c81b9007d46d0746aee605001616"}, + {file = "simsimd-5.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e8d2e9f0e7d2b790ceaab1e6860de1026549a20995d93c55d81c590af4df8e82"}, + {file = "simsimd-5.9.11-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:d55e497ac4f30c31cb3046f81d18855e007d12ff1673437bac1e1a8c017f67d6"}, + {file = "simsimd-5.9.11-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:42c575afe5f9a8195ff86c4fc019972a373c1a3dd08b2263a3e4fc9f3dd9f3a0"}, + {file = "simsimd-5.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c3467413ba3343d683f1f40ed48f424ecb1f4f21dcb4d4aa0fab93790a75f375"}, + {file = "simsimd-5.9.11-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:a65aad00bbae4a7c28383a925e61f5d43edfeed8afc494e1533e5670b6d74900"}, + {file = "simsimd-5.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:344d4e276d40eeaf6c724ce3aa309204c49bbc4d64c45e961861053d46557e3f"}, + {file = "simsimd-5.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d4b7adf20cee0850937550faa1031fc6de5ab2a60d75242608e72809f308c98c"}, + {file = "simsimd-5.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:36bac4397b6d50dbc63be3fab6bb2d93256c892384b0bbb0ca7eeb9fc1386a60"}, + {file = "simsimd-5.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:32f52284c56ed1631054b679151663febeca4a0d265fb11b2d09450e51a80108"}, + {file = "simsimd-5.9.11-cp312-cp312-win32.whl", hash = "sha256:be5cf7833bebdb520fd2a81875ba8740921baba9e0d4ba123041f6b8c358c407"}, + {file = "simsimd-5.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:845172ff6358b285c77311964170e7b50b4de953f8d9f760c8c641cac964966a"}, + {file = "simsimd-5.9.11-cp312-cp312-win_arm64.whl", hash = "sha256:e36a24f31553f86550f6fb3da622c083565d4de7c400bfa80032dd556ae0c2a3"}, + {file = "simsimd-5.9.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:db2134d102f5495a7af97e5544c243b8ea9d25ab1c9f4b5ad9145b9fb07f95c9"}, + {file = "simsimd-5.9.11-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e4803b336f787c45be7da6f28a39ce923b6a868271ea4037e7bd4bc8835478"}, + {file = "simsimd-5.9.11-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8478b76b301da67cbdeb59b839f913461aa3321a1e56ea12c8cfa43277054d6"}, + {file = "simsimd-5.9.11-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e46bd11836155f262797fb6e570e958b251ee7a9c6bc708150d1f4e7cd89721"}, + {file = "simsimd-5.9.11-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:2e8dc07459cf45447c2f23ba793125410af9925fdc5ef5ef2aff6f373bb60358"}, + {file = "simsimd-5.9.11-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:f69c0bf41e8b7782f7dbf1902a35f1c48a62c9bcb957755ad70ecc6a5ffac6a3"}, + {file = "simsimd-5.9.11-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2d1e8610fe233a480cea6a5acf8b67d291cfe854cf5ead867b62e5569b57d849"}, + {file = "simsimd-5.9.11-cp37-cp37m-musllinux_1_2_armv7l.whl", hash = "sha256:574e6475b8632a1e19cff9f8bcf18ae0d7506f22b1a7640bd5ca0c4c86aa69d3"}, + {file = "simsimd-5.9.11-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:7624ebc619325aa9167476b2889fbee9edbbaf93d77608c1b79868029d82f222"}, + {file = "simsimd-5.9.11-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2c6fef446ed48d3d0d9a8f2d296f477c5f667bff38bcaa78247c4c7c5b3ce605"}, + {file = "simsimd-5.9.11-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:d120fbb350ec7287c399583dec6c0483ed897bcc099f877b708588ecdbfa75e9"}, + {file = "simsimd-5.9.11-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:36317c91ae2703ba5415c76bf7a55f6d54a79dbc722f167789f652d5a6b0322e"}, + {file = "simsimd-5.9.11-cp37-cp37m-win32.whl", hash = "sha256:73c67472f8a052522e15fe4c1fe35cd7f37686193452a2cb5d5303780f21a340"}, + {file = "simsimd-5.9.11-cp37-cp37m-win_amd64.whl", hash = "sha256:2aee5a1a1b6528088fa18eeda9357de0b21f635c341f05af4ad684dfb601d2e3"}, + {file = "simsimd-5.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d45725cc3797fd02be2bf8770dcfbd0c2eadef114c3960fb6924a765309549e0"}, + {file = "simsimd-5.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a32b58753ff7956649253da75fc68382ddea99b19bef9df56d4b1726ff0a8d94"}, + {file = "simsimd-5.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:aa9fc6c397ba9f31320d8b9b30068b0bb2857c09a6a01cf2e70892ec18b8012b"}, + {file = "simsimd-5.9.11-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:825ad3c69e306ab35bff789acd2db5d6294852487a7ffa6179e14ecbed4c5316"}, + {file = "simsimd-5.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2dd1a635f6e6b682ac594c02eb683f14b2052fbcc0d4ccdf4307c24b1130252a"}, + {file = "simsimd-5.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2297b60d61af009118ff769bda4d778ee5dfb7b557f177396297a5cda998ee1b"}, + {file = "simsimd-5.9.11-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:db2c103ca7a07f2021157e621db113bf5a5f5a6d32b11702aedca4b4054ae18c"}, + {file = "simsimd-5.9.11-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:b9ec8271d3fa7f9b70ed39d3709a721fd5d94c2aa35767f06f7d908c7a55001e"}, + {file = "simsimd-5.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:230f0df6a887313dad4626e657c7e44e5bc7279eddbdaf74e2e94c5862ccdd43"}, + {file = "simsimd-5.9.11-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:aee92d573d54b9c985000cfbdcabda57cb0fe42ae678dd21f5475e1abd5b6739"}, + {file = "simsimd-5.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:e42e725b040b97f318f2bba489c583ef4ff872987018461ebc2284c8b32ea96a"}, + {file = "simsimd-5.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:587638a18d9ed36df03a3c728a7fe10b7e79785fc3ce866a35fd58dce9e1f22f"}, + {file = "simsimd-5.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:70788a80e399afcc787da4ff502f62e04339805b1f2e364f31d6529ee2de03da"}, + {file = "simsimd-5.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9a4770ac29c2c02e5d02fbd7125bc7365f008d08f06933559a4c4286e20531a2"}, + {file = "simsimd-5.9.11-cp38-cp38-win32.whl", hash = "sha256:ab572de6a37435c475daa6e5deacc829cb79e028dd7269f463bf51c420e34bc0"}, + {file = "simsimd-5.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:0f976b8e3341ee3099ff247a2bed8e82beec7e74ef634b99b51945e33fab28b7"}, + {file = "simsimd-5.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8e4fef000c8bd3603f5e6884dba5aaf2909ca170be99f41516ef304fcbc9411e"}, + {file = "simsimd-5.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b2bf459923688974ab090e5b67b595aa2d9074c6e3d5cc2e70ca57e2c325b01"}, + {file = "simsimd-5.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d5b0a270566ec15d43ce43b1f2b913db3ddd16d230772c29ff2f0402ecffc3d7"}, + {file = "simsimd-5.9.11-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4fba6dfba372229683b7f78b7ff6892601c2eacd861e66e4d84bfa638bd75ed"}, + {file = "simsimd-5.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:362ba4aa418460e8f1e3a2cd13b8dd274525dffc0b26c5a4e75cacf14e8af45b"}, + {file = "simsimd-5.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6cb96639886e69cb1772579536d21204461b775f2383250f5ce5c1e575ad300"}, + {file = "simsimd-5.9.11-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:71ca186e4209e14b2c9ed856e7d831cacf53d6855993eef3417adb030604011b"}, + {file = "simsimd-5.9.11-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:75fca4eb8a0a8ba9058039c0ff30e77ad4d7d5d997340676a0c2c7c62e6d3bd7"}, + {file = "simsimd-5.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f84adb867f09bea8cc30ca415b2d5716783645e9fb1607ac65492ed8e8efec22"}, + {file = "simsimd-5.9.11-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:d64e680c8bd3430f0d74f8f20e0e8e98c5c7631e0d31a3f5cb9700149d647300"}, + {file = "simsimd-5.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:841447f583b11045bfd4e1427aeeee00678d12f67ddd218cb7614f96898bee5f"}, + {file = "simsimd-5.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ba227f65df3bed228843f6226d0a55682fc1c58bfb68c6dda4bad394dfbbf535"}, + {file = "simsimd-5.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b7727c80524768548122eecd5107229e7c1958e97bc666057ce8356703c805a1"}, + {file = "simsimd-5.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3244d8cbc12d2fbc0daf59df7160242871755daabd8cc01e0c905cbdfebbbb1b"}, + {file = "simsimd-5.9.11-cp39-cp39-win32.whl", hash = "sha256:2a1ffe93e781a292f1b1d34b47fbabe82414212e8cb97340428cfe4e800b72c8"}, + {file = "simsimd-5.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:86f24a980c2ac10ad8e6341281c86bc769f84c30f633ba8213d7ee046bbe9599"}, + {file = "simsimd-5.9.11-cp39-cp39-win_arm64.whl", hash = "sha256:0c63ddf5ad90ae2c80309e7763a2d4306738e19f31b614f1cc6d0f784199350a"}, + {file = "simsimd-5.9.11.tar.gz", hash = "sha256:053c034c73aa291cc9189ce90f49ca6c5d4e0b30e4d990a25965c2f516d4a21a"}, ] [[package]] @@ -2195,68 +2189,60 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.36" +version = "2.0.35" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"}, - {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, - {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, + {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, + {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, ] [package.dependencies] @@ -2269,7 +2255,7 @@ aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] asyncio = ["greenlet (!=0.4.17)"] asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] mssql-pyodbc = ["pyodbc"] @@ -2366,24 +2352,24 @@ blobfile = ["blobfile (>=2)"] [[package]] name = "tomli" -version = "2.0.2" +version = "2.1.0" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" files = [ - {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, - {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, + {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, + {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, ] [[package]] name = "tqdm" -version = "4.66.6" +version = "4.67.0" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.6-py3-none-any.whl", hash = "sha256:223e8b5359c2efc4b30555531f09e9f2f3589bcd7fdd389271191031b49b7a63"}, - {file = "tqdm-4.66.6.tar.gz", hash = "sha256:4bdd694238bef1485ce839d67967ab50af8f9272aab687c0d7702a01da0be090"}, + {file = "tqdm-4.67.0-py3-none-any.whl", hash = "sha256:0cd8af9d56911acab92182e88d763100d4788bdf421d251616040cc4d44863be"}, + {file = "tqdm-4.67.0.tar.gz", hash = "sha256:fe5a6f95e6fe0b9755e9469b77b9c3cf850048224ecaa8293d7d2d31f97d869a"}, ] [package.dependencies] @@ -2391,6 +2377,7 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [package.extras] dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +discord = ["requests"] notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] @@ -2440,41 +2427,41 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "watchdog" -version = "5.0.3" +version = "6.0.0" description = "Filesystem events monitoring" optional = false python-versions = ">=3.9" files = [ - {file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:85527b882f3facda0579bce9d743ff7f10c3e1e0db0a0d0e28170a7d0e5ce2ea"}, - {file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:53adf73dcdc0ef04f7735066b4a57a4cd3e49ef135daae41d77395f0b5b692cb"}, - {file = "watchdog-5.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e25adddab85f674acac303cf1f5835951345a56c5f7f582987d266679979c75b"}, - {file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f01f4a3565a387080dc49bdd1fefe4ecc77f894991b88ef927edbfa45eb10818"}, - {file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:91b522adc25614cdeaf91f7897800b82c13b4b8ac68a42ca959f992f6990c490"}, - {file = "watchdog-5.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d52db5beb5e476e6853da2e2d24dbbbed6797b449c8bf7ea118a4ee0d2c9040e"}, - {file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:94d11b07c64f63f49876e0ab8042ae034674c8653bfcdaa8c4b32e71cfff87e8"}, - {file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:349c9488e1d85d0a58e8cb14222d2c51cbc801ce11ac3936ab4c3af986536926"}, - {file = "watchdog-5.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:53a3f10b62c2d569e260f96e8d966463dec1a50fa4f1b22aec69e3f91025060e"}, - {file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:950f531ec6e03696a2414b6308f5c6ff9dab7821a768c9d5788b1314e9a46ca7"}, - {file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae6deb336cba5d71476caa029ceb6e88047fc1dc74b62b7c4012639c0b563906"}, - {file = "watchdog-5.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1021223c08ba8d2d38d71ec1704496471ffd7be42cfb26b87cd5059323a389a1"}, - {file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:752fb40efc7cc8d88ebc332b8f4bcbe2b5cc7e881bccfeb8e25054c00c994ee3"}, - {file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2e8f3f955d68471fa37b0e3add18500790d129cc7efe89971b8a4cc6fdeb0b2"}, - {file = "watchdog-5.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b8ca4d854adcf480bdfd80f46fdd6fb49f91dd020ae11c89b3a79e19454ec627"}, - {file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:90a67d7857adb1d985aca232cc9905dd5bc4803ed85cfcdcfcf707e52049eda7"}, - {file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:720ef9d3a4f9ca575a780af283c8fd3a0674b307651c1976714745090da5a9e8"}, - {file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:223160bb359281bb8e31c8f1068bf71a6b16a8ad3d9524ca6f523ac666bb6a1e"}, - {file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:560135542c91eaa74247a2e8430cf83c4342b29e8ad4f520ae14f0c8a19cfb5b"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dd021efa85970bd4824acacbb922066159d0f9e546389a4743d56919b6758b91"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_armv7l.whl", hash = "sha256:78864cc8f23dbee55be34cc1494632a7ba30263951b5b2e8fc8286b95845f82c"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_i686.whl", hash = "sha256:1e9679245e3ea6498494b3028b90c7b25dbb2abe65c7d07423ecfc2d6218ff7c"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64.whl", hash = "sha256:9413384f26b5d050b6978e6fcd0c1e7f0539be7a4f1a885061473c5deaa57221"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:294b7a598974b8e2c6123d19ef15de9abcd282b0fbbdbc4d23dfa812959a9e05"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_s390x.whl", hash = "sha256:26dd201857d702bdf9d78c273cafcab5871dd29343748524695cecffa44a8d97"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:0f9332243355643d567697c3e3fa07330a1d1abf981611654a1f2bf2175612b7"}, - {file = "watchdog-5.0.3-py3-none-win32.whl", hash = "sha256:c66f80ee5b602a9c7ab66e3c9f36026590a0902db3aea414d59a2f55188c1f49"}, - {file = "watchdog-5.0.3-py3-none-win_amd64.whl", hash = "sha256:f00b4cf737f568be9665563347a910f8bdc76f88c2970121c86243c8cfdf90e9"}, - {file = "watchdog-5.0.3-py3-none-win_ia64.whl", hash = "sha256:49f4d36cb315c25ea0d946e018c01bb028048023b9e103d3d3943f58e109dd45"}, - {file = "watchdog-5.0.3.tar.gz", hash = "sha256:108f42a7f0345042a854d4d0ad0834b741d421330d5f575b81cb27b883500176"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2"}, + {file = "watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a"}, + {file = "watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680"}, + {file = "watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f"}, + {file = "watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282"}, ] [package.extras] @@ -2482,93 +2469,93 @@ watchmedo = ["PyYAML (>=3.10)"] [[package]] name = "yarl" -version = "1.17.0" +version = "1.18.0" description = "Yet another URL library" optional = false python-versions = ">=3.9" files = [ - {file = "yarl-1.17.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2d8715edfe12eee6f27f32a3655f38d6c7410deb482158c0b7d4b7fad5d07628"}, - {file = "yarl-1.17.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1803bf2a7a782e02db746d8bd18f2384801bc1d108723840b25e065b116ad726"}, - {file = "yarl-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e66589110e20c2951221a938fa200c7aa134a8bdf4e4dc97e6b21539ff026d4"}, - {file = "yarl-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7069d411cfccf868e812497e0ec4acb7c7bf8d684e93caa6c872f1e6f5d1664d"}, - {file = "yarl-1.17.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cbf70ba16118db3e4b0da69dcde9d4d4095d383c32a15530564c283fa38a7c52"}, - {file = "yarl-1.17.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0bc53cc349675b32ead83339a8de79eaf13b88f2669c09d4962322bb0f064cbc"}, - {file = "yarl-1.17.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6aa18a402d1c80193ce97c8729871f17fd3e822037fbd7d9b719864018df746"}, - {file = "yarl-1.17.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d89c5bc701861cfab357aa0cd039bc905fe919997b8c312b4b0c358619c38d4d"}, - {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b728bdf38ca58f2da1d583e4af4ba7d4cd1a58b31a363a3137a8159395e7ecc7"}, - {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:5542e57dc15d5473da5a39fbde14684b0cc4301412ee53cbab677925e8497c11"}, - {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e564b57e5009fb150cb513804d7e9e9912fee2e48835638f4f47977f88b4a39c"}, - {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:eb3c4cff524b4c1c1dba3a6da905edb1dfd2baf6f55f18a58914bbb2d26b59e1"}, - {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:05e13f389038842da930d439fbed63bdce3f7644902714cb68cf527c971af804"}, - {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:153c38ee2b4abba136385af4467459c62d50f2a3f4bde38c7b99d43a20c143ef"}, - {file = "yarl-1.17.0-cp310-cp310-win32.whl", hash = "sha256:4065b4259d1ae6f70fd9708ffd61e1c9c27516f5b4fae273c41028afcbe3a094"}, - {file = "yarl-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:abf366391a02a8335c5c26163b5fe6f514cc1d79e74d8bf3ffab13572282368e"}, - {file = "yarl-1.17.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:19a4fe0279626c6295c5b0c8c2bb7228319d2e985883621a6e87b344062d8135"}, - {file = "yarl-1.17.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cadd0113f4db3c6b56868d6a19ca6286f5ccfa7bc08c27982cf92e5ed31b489a"}, - {file = "yarl-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:60d6693eef43215b1ccfb1df3f6eae8db30a9ff1e7989fb6b2a6f0b468930ee8"}, - {file = "yarl-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb8bf3843e1fa8cf3fe77813c512818e57368afab7ebe9ef02446fe1a10b492"}, - {file = "yarl-1.17.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d2a5b35fd1d8d90443e061d0c8669ac7600eec5c14c4a51f619e9e105b136715"}, - {file = "yarl-1.17.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c5bf17b32f392df20ab5c3a69d37b26d10efaa018b4f4e5643c7520d8eee7ac7"}, - {file = "yarl-1.17.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48f51b529b958cd06e78158ff297a8bf57b4021243c179ee03695b5dbf9cb6e1"}, - {file = "yarl-1.17.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5fcaa06bf788e19f913d315d9c99a69e196a40277dc2c23741a1d08c93f4d430"}, - {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:32f3ee19ff0f18a7a522d44e869e1ebc8218ad3ae4ebb7020445f59b4bbe5897"}, - {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:a4fb69a81ae2ec2b609574ae35420cf5647d227e4d0475c16aa861dd24e840b0"}, - {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7bacc8b77670322132a1b2522c50a1f62991e2f95591977455fd9a398b4e678d"}, - {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:437bf6eb47a2d20baaf7f6739895cb049e56896a5ffdea61a4b25da781966e8b"}, - {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:30534a03c87484092080e3b6e789140bd277e40f453358900ad1f0f2e61fc8ec"}, - {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b30df4ff98703649915144be6f0df3b16fd4870ac38a09c56d5d9e54ff2d5f96"}, - {file = "yarl-1.17.0-cp311-cp311-win32.whl", hash = "sha256:263b487246858e874ab53e148e2a9a0de8465341b607678106829a81d81418c6"}, - {file = "yarl-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:07055a9e8b647a362e7d4810fe99d8f98421575e7d2eede32e008c89a65a17bd"}, - {file = "yarl-1.17.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:84095ab25ba69a8fa3fb4936e14df631b8a71193fe18bd38be7ecbe34d0f5512"}, - {file = "yarl-1.17.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:02608fb3f6df87039212fc746017455ccc2a5fc96555ee247c45d1e9f21f1d7b"}, - {file = "yarl-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:13468d291fe8c12162b7cf2cdb406fe85881c53c9e03053ecb8c5d3523822cd9"}, - {file = "yarl-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8da3f8f368fb7e2f052fded06d5672260c50b5472c956a5f1bd7bf474ae504ab"}, - {file = "yarl-1.17.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec0507ab6523980bed050137007c76883d941b519aca0e26d4c1ec1f297dd646"}, - {file = "yarl-1.17.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08fc76df7fd8360e9ff30e6ccc3ee85b8dbd6ed5d3a295e6ec62bcae7601b932"}, - {file = "yarl-1.17.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d522f390686acb6bab2b917dd9ca06740c5080cd2eaa5aef8827b97e967319d"}, - {file = "yarl-1.17.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:147c527a80bb45b3dcd6e63401af8ac574125d8d120e6afe9901049286ff64ef"}, - {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:24cf43bcd17a0a1f72284e47774f9c60e0bf0d2484d5851f4ddf24ded49f33c6"}, - {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c28a44b9e0fba49c3857360e7ad1473fc18bc7f6659ca08ed4f4f2b9a52c75fa"}, - {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:350cacb2d589bc07d230eb995d88fcc646caad50a71ed2d86df533a465a4e6e1"}, - {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:fd1ab1373274dea1c6448aee420d7b38af163b5c4732057cd7ee9f5454efc8b1"}, - {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4934e0f96dadc567edc76d9c08181633c89c908ab5a3b8f698560124167d9488"}, - {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8d0a278170d75c88e435a1ce76557af6758bfebc338435b2eba959df2552163e"}, - {file = "yarl-1.17.0-cp312-cp312-win32.whl", hash = "sha256:61584f33196575a08785bb56db6b453682c88f009cd9c6f338a10f6737ce419f"}, - {file = "yarl-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:9987a439ad33a7712bd5bbd073f09ad10d38640425fa498ecc99d8aa064f8fc4"}, - {file = "yarl-1.17.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8deda7b8eb15a52db94c2014acdc7bdd14cb59ec4b82ac65d2ad16dc234a109e"}, - {file = "yarl-1.17.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:56294218b348dcbd3d7fce0ffd79dd0b6c356cb2a813a1181af730b7c40de9e7"}, - {file = "yarl-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1fab91292f51c884b290ebec0b309a64a5318860ccda0c4940e740425a67b6b7"}, - {file = "yarl-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cf93fa61ff4d9c7d40482ce1a2c9916ca435e34a1b8451e17f295781ccc034f"}, - {file = "yarl-1.17.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:261be774a0d71908c8830c33bacc89eef15c198433a8cc73767c10eeeb35a7d0"}, - {file = "yarl-1.17.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deec9693b67f6af856a733b8a3e465553ef09e5e8ead792f52c25b699b8f9e6e"}, - {file = "yarl-1.17.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c804b07622ba50a765ca7fb8145512836ab65956de01307541def869e4a456c9"}, - {file = "yarl-1.17.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d013a7c9574e98c14831a8f22d27277688ec3b2741d0188ac01a910b009987a"}, - {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e2cfcba719bd494c7413dcf0caafb51772dec168c7c946e094f710d6aa70494e"}, - {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:c068aba9fc5b94dfae8ea1cedcbf3041cd4c64644021362ffb750f79837e881f"}, - {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3616df510ffac0df3c9fa851a40b76087c6c89cbcea2de33a835fc80f9faac24"}, - {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:755d6176b442fba9928a4df787591a6a3d62d4969f05c406cad83d296c5d4e05"}, - {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:c18f6e708d1cf9ff5b1af026e697ac73bea9cb70ee26a2b045b112548579bed2"}, - {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5b937c216b6dee8b858c6afea958de03c5ff28406257d22b55c24962a2baf6fd"}, - {file = "yarl-1.17.0-cp313-cp313-win32.whl", hash = "sha256:d0131b14cb545c1a7bd98f4565a3e9bdf25a1bd65c83fc156ee5d8a8499ec4a3"}, - {file = "yarl-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:01c96efa4313c01329e88b7e9e9e1b2fc671580270ddefdd41129fa8d0db7696"}, - {file = "yarl-1.17.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0d44f67e193f0a7acdf552ecb4d1956a3a276c68e7952471add9f93093d1c30d"}, - {file = "yarl-1.17.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:16ea0aa5f890cdcb7ae700dffa0397ed6c280840f637cd07bffcbe4b8d68b985"}, - {file = "yarl-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cf5469dc7dcfa65edf5cc3a6add9f84c5529c6b556729b098e81a09a92e60e51"}, - {file = "yarl-1.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e662bf2f6e90b73cf2095f844e2bc1fda39826472a2aa1959258c3f2a8500a2f"}, - {file = "yarl-1.17.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8260e88f1446904ba20b558fa8ce5d0ab9102747238e82343e46d056d7304d7e"}, - {file = "yarl-1.17.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dc16477a4a2c71e64c5d3d15d7ae3d3a6bb1e8b955288a9f73c60d2a391282f"}, - {file = "yarl-1.17.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46027e326cecd55e5950184ec9d86c803f4f6fe4ba6af9944a0e537d643cdbe0"}, - {file = "yarl-1.17.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc95e46c92a2b6f22e70afe07e34dbc03a4acd07d820204a6938798b16f4014f"}, - {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:16ca76c7ac9515320cd09d6cc083d8d13d1803f6ebe212b06ea2505fd66ecff8"}, - {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:eb1a5b97388f2613f9305d78a3473cdf8d80c7034e554d8199d96dcf80c62ac4"}, - {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:41fd5498975418cdc34944060b8fbeec0d48b2741068077222564bea68daf5a6"}, - {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:146ca582ed04a5664ad04b0e0603934281eaab5c0115a5a46cce0b3c061a56a1"}, - {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:6abb8c06107dbec97481b2392dafc41aac091a5d162edf6ed7d624fe7da0587a"}, - {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4d14be4613dd4f96c25feb4bd8c0d8ce0f529ab0ae555a17df5789e69d8ec0c5"}, - {file = "yarl-1.17.0-cp39-cp39-win32.whl", hash = "sha256:174d6a6cad1068f7850702aad0c7b1bca03bcac199ca6026f84531335dfc2646"}, - {file = "yarl-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:6af417ca2c7349b101d3fd557ad96b4cd439fdb6ab0d288e3f64a068eea394d0"}, - {file = "yarl-1.17.0-py3-none-any.whl", hash = "sha256:62dd42bb0e49423f4dd58836a04fcf09c80237836796025211bbe913f1524993"}, - {file = "yarl-1.17.0.tar.gz", hash = "sha256:d3f13583f378930377e02002b4085a3d025b00402d5a80911726d43a67911cd9"}, + {file = "yarl-1.18.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:074fee89caab89a97e18ef5f29060ef61ba3cae6cd77673acc54bfdd3214b7b7"}, + {file = "yarl-1.18.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b026cf2c32daf48d90c0c4e406815c3f8f4cfe0c6dfccb094a9add1ff6a0e41a"}, + {file = "yarl-1.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ae38bd86eae3ba3d2ce5636cc9e23c80c9db2e9cb557e40b98153ed102b5a736"}, + {file = "yarl-1.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:685cc37f3f307c6a8e879986c6d85328f4c637f002e219f50e2ef66f7e062c1d"}, + {file = "yarl-1.18.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8254dbfce84ee5d1e81051ee7a0f1536c108ba294c0fdb5933476398df0654f3"}, + {file = "yarl-1.18.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:20de4a8b04de70c49698dc2390b7fd2d18d424d3b876371f9b775e2b462d4b41"}, + {file = "yarl-1.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0a2074a37285570d54b55820687de3d2f2b9ecf1b714e482e48c9e7c0402038"}, + {file = "yarl-1.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f576ed278860df2721a5d57da3381040176ef1d07def9688a385c8330db61a1"}, + {file = "yarl-1.18.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3a3709450a574d61be6ac53d582496014342ea34876af8dc17cc16da32826c9a"}, + {file = "yarl-1.18.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:bd80ed29761490c622edde5dd70537ca8c992c2952eb62ed46984f8eff66d6e8"}, + {file = "yarl-1.18.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:32141e13a1d5a48525e519c9197d3f4d9744d818d5c7d6547524cc9eccc8971e"}, + {file = "yarl-1.18.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8b8d3e4e014fb4274f1c5bf61511d2199e263909fb0b8bda2a7428b0894e8dc6"}, + {file = "yarl-1.18.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:701bb4a8f4de191c8c0cc9a1e6d5142f4df880e9d1210e333b829ca9425570ed"}, + {file = "yarl-1.18.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a45d94075ac0647621eaaf693c8751813a3eccac455d423f473ffed38c8ac5c9"}, + {file = "yarl-1.18.0-cp310-cp310-win32.whl", hash = "sha256:34176bfb082add67cb2a20abd85854165540891147f88b687a5ed0dc225750a0"}, + {file = "yarl-1.18.0-cp310-cp310-win_amd64.whl", hash = "sha256:73553bbeea7d6ec88c08ad8027f4e992798f0abc459361bf06641c71972794dc"}, + {file = "yarl-1.18.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b8e8c516dc4e1a51d86ac975b0350735007e554c962281c432eaa5822aa9765c"}, + {file = "yarl-1.18.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2e6b4466714a73f5251d84b471475850954f1fa6acce4d3f404da1d55d644c34"}, + {file = "yarl-1.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c893f8c1a6d48b25961e00922724732d00b39de8bb0b451307482dc87bddcd74"}, + {file = "yarl-1.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13aaf2bdbc8c86ddce48626b15f4987f22e80d898818d735b20bd58f17292ee8"}, + {file = "yarl-1.18.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd21c0128e301851de51bc607b0a6da50e82dc34e9601f4b508d08cc89ee7929"}, + {file = "yarl-1.18.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:205de377bd23365cd85562c9c6c33844050a93661640fda38e0567d2826b50df"}, + {file = "yarl-1.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed69af4fe2a0949b1ea1d012bf065c77b4c7822bad4737f17807af2adb15a73c"}, + {file = "yarl-1.18.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e1c18890091aa3cc8a77967943476b729dc2016f4cfe11e45d89b12519d4a93"}, + {file = "yarl-1.18.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:91b8fb9427e33f83ca2ba9501221ffaac1ecf0407f758c4d2f283c523da185ee"}, + {file = "yarl-1.18.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:536a7a8a53b75b2e98ff96edb2dfb91a26b81c4fed82782035767db5a465be46"}, + {file = "yarl-1.18.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a64619a9c47c25582190af38e9eb382279ad42e1f06034f14d794670796016c0"}, + {file = "yarl-1.18.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c73a6bbc97ba1b5a0c3c992ae93d721c395bdbb120492759b94cc1ac71bc6350"}, + {file = "yarl-1.18.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a173401d7821a2a81c7b47d4e7d5c4021375a1441af0c58611c1957445055056"}, + {file = "yarl-1.18.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7520e799b1f84e095cce919bd6c23c9d49472deeef25fe1ef960b04cca51c3fc"}, + {file = "yarl-1.18.0-cp311-cp311-win32.whl", hash = "sha256:c4cb992d8090d5ae5f7afa6754d7211c578be0c45f54d3d94f7781c495d56716"}, + {file = "yarl-1.18.0-cp311-cp311-win_amd64.whl", hash = "sha256:52c136f348605974c9b1c878addd6b7a60e3bf2245833e370862009b86fa4689"}, + {file = "yarl-1.18.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1ece25e2251c28bab737bdf0519c88189b3dd9492dc086a1d77336d940c28ced"}, + {file = "yarl-1.18.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:454902dc1830d935c90b5b53c863ba2a98dcde0fbaa31ca2ed1ad33b2a7171c6"}, + {file = "yarl-1.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:01be8688fc211dc237e628fcc209dda412d35de7642453059a0553747018d075"}, + {file = "yarl-1.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d26f1fa9fa2167bb238f6f4b20218eb4e88dd3ef21bb8f97439fa6b5313e30d"}, + {file = "yarl-1.18.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b234a4a9248a9f000b7a5dfe84b8cb6210ee5120ae70eb72a4dcbdb4c528f72f"}, + {file = "yarl-1.18.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe94d1de77c4cd8caff1bd5480e22342dbd54c93929f5943495d9c1e8abe9f42"}, + {file = "yarl-1.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b4c90c5363c6b0a54188122b61edb919c2cd1119684999d08cd5e538813a28e"}, + {file = "yarl-1.18.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49a98ecadc5a241c9ba06de08127ee4796e1009555efd791bac514207862b43d"}, + {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9106025c7f261f9f5144f9aa7681d43867eed06349a7cfb297a1bc804de2f0d1"}, + {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:f275ede6199d0f1ed4ea5d55a7b7573ccd40d97aee7808559e1298fe6efc8dbd"}, + {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f7edeb1dcc7f50a2c8e08b9dc13a413903b7817e72273f00878cb70e766bdb3b"}, + {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c083f6dd6951b86e484ebfc9c3524b49bcaa9c420cb4b2a78ef9f7a512bfcc85"}, + {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:80741ec5b471fbdfb997821b2842c59660a1c930ceb42f8a84ba8ca0f25a66aa"}, + {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b1a3297b9cad594e1ff0c040d2881d7d3a74124a3c73e00c3c71526a1234a9f7"}, + {file = "yarl-1.18.0-cp312-cp312-win32.whl", hash = "sha256:cd6ab7d6776c186f544f893b45ee0c883542b35e8a493db74665d2e594d3ca75"}, + {file = "yarl-1.18.0-cp312-cp312-win_amd64.whl", hash = "sha256:039c299a0864d1f43c3e31570045635034ea7021db41bf4842693a72aca8df3a"}, + {file = "yarl-1.18.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6fb64dd45453225f57d82c4764818d7a205ee31ce193e9f0086e493916bd4f72"}, + {file = "yarl-1.18.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3adaaf9c6b1b4fc258584f4443f24d775a2086aee82d1387e48a8b4f3d6aecf6"}, + {file = "yarl-1.18.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:da206d1ec78438a563c5429ab808a2b23ad7bc025c8adbf08540dde202be37d5"}, + {file = "yarl-1.18.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:576d258b21c1db4c6449b1c572c75d03f16a482eb380be8003682bdbe7db2f28"}, + {file = "yarl-1.18.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c60e547c0a375c4bfcdd60eef82e7e0e8698bf84c239d715f5c1278a73050393"}, + {file = "yarl-1.18.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3818eabaefb90adeb5e0f62f047310079d426387991106d4fbf3519eec7d90a"}, + {file = "yarl-1.18.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5f72421246c21af6a92fbc8c13b6d4c5427dfd949049b937c3b731f2f9076bd"}, + {file = "yarl-1.18.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7fa7d37f2ada0f42e0723632993ed422f2a679af0e200874d9d861720a54f53e"}, + {file = "yarl-1.18.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:42ba84e2ac26a3f252715f8ec17e6fdc0cbf95b9617c5367579fafcd7fba50eb"}, + {file = "yarl-1.18.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:6a49ad0102c0f0ba839628d0bf45973c86ce7b590cdedf7540d5b1833ddc6f00"}, + {file = "yarl-1.18.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:96404e8d5e1bbe36bdaa84ef89dc36f0e75939e060ca5cd45451aba01db02902"}, + {file = "yarl-1.18.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a0509475d714df8f6d498935b3f307cd122c4ca76f7d426c7e1bb791bcd87eda"}, + {file = "yarl-1.18.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1ff116f0285b5c8b3b9a2680aeca29a858b3b9e0402fc79fd850b32c2bcb9f8b"}, + {file = "yarl-1.18.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2580c1d7e66e6d29d6e11855e3b1c6381971e0edd9a5066e6c14d79bc8967af"}, + {file = "yarl-1.18.0-cp313-cp313-win32.whl", hash = "sha256:14408cc4d34e202caba7b5ac9cc84700e3421a9e2d1b157d744d101b061a4a88"}, + {file = "yarl-1.18.0-cp313-cp313-win_amd64.whl", hash = "sha256:1db1537e9cb846eb0ff206eac667f627794be8b71368c1ab3207ec7b6f8c5afc"}, + {file = "yarl-1.18.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fa2c9cb607e0f660d48c54a63de7a9b36fef62f6b8bd50ff592ce1137e73ac7d"}, + {file = "yarl-1.18.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c0f4808644baf0a434a3442df5e0bedf8d05208f0719cedcd499e168b23bfdc4"}, + {file = "yarl-1.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7db9584235895a1dffca17e1c634b13870852094f6389b68dcc6338086aa7b08"}, + {file = "yarl-1.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:309f8d27d6f93ceeeb80aa6980e883aa57895270f7f41842b92247e65d7aeddf"}, + {file = "yarl-1.18.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:609ffd44fed2ed88d9b4ef62ee860cf86446cf066333ad4ce4123505b819e581"}, + {file = "yarl-1.18.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f172b8b2c72a13a06ea49225a9c47079549036ad1b34afa12d5491b881f5b993"}, + {file = "yarl-1.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d89ae7de94631b60d468412c18290d358a9d805182373d804ec839978b120422"}, + {file = "yarl-1.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:466d31fd043ef9af822ee3f1df8fdff4e8c199a7f4012c2642006af240eade17"}, + {file = "yarl-1.18.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7609b8462351c4836b3edce4201acb6dd46187b207c589b30a87ffd1813b48dc"}, + {file = "yarl-1.18.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:d9d4f5e471e8dc49b593a80766c2328257e405f943c56a3dc985c125732bc4cf"}, + {file = "yarl-1.18.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:67b336c15e564d76869c9a21316f90edf546809a5796a083b8f57c845056bc01"}, + {file = "yarl-1.18.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b212452b80cae26cb767aa045b051740e464c5129b7bd739c58fbb7deb339e7b"}, + {file = "yarl-1.18.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:38b39b7b3e692b6c92b986b00137a3891eddb66311b229d1940dcbd4f025083c"}, + {file = "yarl-1.18.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a7ee6884a8848792d58b854946b685521f41d8871afa65e0d4a774954e9c9e89"}, + {file = "yarl-1.18.0-cp39-cp39-win32.whl", hash = "sha256:b4095c5019bb889aa866bf12ed4c85c0daea5aafcb7c20d1519f02a1e738f07f"}, + {file = "yarl-1.18.0-cp39-cp39-win_amd64.whl", hash = "sha256:2d90f2e4d16a5b0915ee065218b435d2ef619dd228973b1b47d262a6f7cd8fa5"}, + {file = "yarl-1.18.0-py3-none-any.whl", hash = "sha256:dbf53db46f7cf176ee01d8d98c39381440776fcda13779d269a8ba664f69bec0"}, + {file = "yarl-1.18.0.tar.gz", hash = "sha256:20d95535e7d833889982bfe7cc321b7f63bf8879788fee982c76ae2b24cfb715"}, ] [package.dependencies] @@ -2579,4 +2566,4 @@ propcache = ">=0.2.0" [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "32f31b5c8dca21261535dc2f56d19047724b2e31697fa21d06cdf20331b5d254" +content-hash = "a3ad4a424c3b998db93055c0f9684f12a978b30330caea956fa4312024d9c13c" diff --git a/libs/mongodb/pyproject.toml b/libs/mongodb/pyproject.toml index 58d157b..5f8f5ef 100644 --- a/libs/mongodb/pyproject.toml +++ b/libs/mongodb/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["poetry-core>=1.0.0"] +requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.poetry] @@ -11,13 +11,6 @@ readme = "README.md" repository = "https://github.com/langchain-ai/langchain-mongodb" license = "MIT" -[tool.mypy] -disallow_untyped_defs = "True" - -[tool.poetry.urls] -"Source Code" = "https://github.com/langchain-ai/langchain-mongodb/tree/main/libs/mongodb" -"Release Notes" = "https://github.com/langchain-ai/langchain-mongodb/releases" - [tool.poetry.dependencies] python = "^3.9" pymongo = "^4.6.1" @@ -34,57 +27,44 @@ python = "<3.12" version = "^1.26.0" python = ">=3.12" -[tool.ruff.lint] -select = ["E", "F", "I"] - -[tool.coverage.run] -omit = ["tests/*"] - -[tool.pytest.ini_options] -addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5" -markers = [ - "requires: mark tests as requiring a specific library", - "compile: mark placeholder test used to compile integration tests without running them", -] -asyncio_mode = "auto" - -[tool.poetry.group.test] -optional = true - -[tool.poetry.group.codespell] -optional = true - -[tool.poetry.group.test_integration] -optional = true - -[tool.poetry.group.lint] +[tool.poetry.group.dev] optional = true -[tool.poetry.group.test.dependencies] +[tool.poetry.group.dev.dependencies] +langchain = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/langchain"} +langchain-core = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/core"} +langchain-text-splitters = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/text-splitters"} +# test pytest = "^7.3.0" freezegun = "^1.2.2" pytest-mock = "^3.10.0" syrupy = "^4.0.2" pytest-watcher = "^0.3.4" pytest-asyncio = "^0.21.1" -langchain = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/langchain"} -langchain-core = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/core"} -langchain-text-splitters = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/text-splitters"} mongomock = "^4.2.0.post1" - - -[tool.poetry.group.codespell.dependencies] +# lint +ruff = ">=0.5" +mypy = "^1.10" +simsimd = "^5.0.0" codespell = "^2.2.0" - -[tool.poetry.group.test_integration.dependencies] +# integration test langchain-openai = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/partners/openai"} langchain-community = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/community"} pypdf = "^5.0.1" -[tool.poetry.group.lint.dependencies] -ruff = ">=0.5" +[tool.pytest.ini_options] +addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5" +markers = [ + "requires: mark tests as requiring a specific library", + "compile: mark placeholder test used to compile integration tests without running them", +] +asyncio_mode = "auto" -[tool.poetry.group.typing.dependencies] -mypy = "^1.10" -simsimd = "^5.0.0" -langchain-core = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/core"} +[tool.mypy] +disallow_untyped_defs = "True" + +[tool.ruff.lint] +select = ["E", "F", "I"] + +[tool.coverage.run] +omit = ["tests/*"]