diff --git a/.github/workflows/deploy_docs.yml b/.github/workflows/deploy_docs.yml new file mode 100644 index 00000000..52516432 --- /dev/null +++ b/.github/workflows/deploy_docs.yml @@ -0,0 +1,27 @@ +name: Deploy Docs + +on: + push: + branches: + - main + +jobs: + deploy: + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + - run: pipx install poetry + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + cache: "poetry" + - run: poetry install + - run: poetry run mkdocs build + - name: Deploy to GitHub Pages + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./site diff --git a/docs/CNAME b/docs/CNAME new file mode 100644 index 00000000..22179c55 --- /dev/null +++ b/docs/CNAME @@ -0,0 +1 @@ +magentic.dev diff --git a/docs/asyncio.md b/docs/asyncio.md new file mode 100644 index 00000000..84f5d351 --- /dev/null +++ b/docs/asyncio.md @@ -0,0 +1,47 @@ +# Asyncio + +Asynchronous functions / coroutines can be used to concurrently query the LLM. This can greatly increase the overall speed of generation, and also allow other asynchronous code to run while waiting on LLM output. In the below example, the LLM generates a description for each US president while it is waiting on the next one in the list. Measuring the characters generated per second shows that this example achieves a 7x speedup over serial processing. + +```python +import asyncio +from time import time +from typing import AsyncIterable + +from magentic import prompt + + +@prompt("List ten presidents of the United States") +async def iter_presidents() -> AsyncIterable[str]: + ... + + +@prompt("Tell me more about {topic}") +async def tell_me_more_about(topic: str) -> str: + ... + + +# For each president listed, generate a description concurrently +start_time = time() +tasks = [] +async for president in await iter_presidents(): + # Use asyncio.create_task to schedule the coroutine for execution before awaiting it + # This way descriptions will start being generated while the list of presidents is still being generated + task = asyncio.create_task(tell_me_more_about(president)) + tasks.append(task) + +descriptions = await asyncio.gather(*tasks) + +# Measure the characters per second +total_chars = sum(len(desc) for desc in descriptions) +time_elapsed = time() - start_time +print(total_chars, time_elapsed, total_chars / time_elapsed) +# 24575 28.70 856.07 + + +# Measure the characters per second to describe a single president +start_time = time() +out = await tell_me_more_about("George Washington") +time_elapsed = time() - start_time +print(len(out), time_elapsed, len(out) / time_elapsed) +# 2206 18.72 117.78 +``` diff --git a/docs/chat-prompting.md b/docs/chat-prompting.md new file mode 100644 index 00000000..080c5e08 --- /dev/null +++ b/docs/chat-prompting.md @@ -0,0 +1,34 @@ +# Chat Prompting + +The `@chatprompt` decorator works just like `@prompt` but allows you to pass chat messages as a template rather than a single text prompt. This can be used to provide a system message or for few-shot prompting where you provide example responses to guide the model's output. Format fields denoted by curly braces `{example}` will be filled in all messages - use the `escape_braces` function to prevent a string being used as a template. + +```python +from magentic import chatprompt, AssistantMessage, SystemMessage, UserMessage +from magentic.chatprompt import escape_braces + +from pydantic import BaseModel + + +class Quote(BaseModel): + quote: str + character: str + + +@chatprompt( + SystemMessage("You are a movie buff."), + UserMessage("What is your favorite quote from Harry Potter?"), + AssistantMessage( + Quote( + quote="It does not do to dwell on dreams and forget to live.", + character="Albus Dumbledore", + ) + ), + UserMessage("What is your favorite quote from {movie}?"), +) +def get_movie_quote(movie: str) -> Quote: + ... + + +get_movie_quote("Iron Man") +# Quote(quote='I am Iron Man.', character='Tony Stark') +``` diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 00000000..225e1f71 --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,59 @@ +# LLM Configuration + +Currently two backends are available + +- `openai` : the default backend that uses the `openai` Python package. Supports all features. +- `litellm` : uses the `litellm` Python package to enable querying LLMs from [many different providers](https://docs.litellm.ai/docs/providers). Install this with `pip install magentic[litellm]`. Note: some models may not support all features of `magentic` e.g. function calling/structured output and streaming. + +The backend and LLM used by `magentic` can be configured in several ways. The order of precedence of configuration is + +1. Arguments explicitly passed when initializing an instance in Python +1. Values set using a context manager in Python +1. Environment variables +1. Default values from [src/magentic/settings.py](https://github.com/jackmpcollins/magentic/src/magentic/settings.py) + +```python +from magentic import OpenaiChatModel, prompt +from magentic.chat_model.litellm_chat_model import LitellmChatModel + + +@prompt("Say hello") +def say_hello() -> str: + ... + + +@prompt( + "Say hello", + model=LitellmChatModel("ollama/llama2"), +) +def say_hello_litellm() -> str: + ... + + +say_hello() # Uses env vars or default settings + +with OpenaiChatModel("gpt-3.5-turbo", temperature=1): + say_hello() # Uses openai with gpt-3.5-turbo and temperature=1 due to context manager + say_hello_litellm() # Uses litellm with ollama/llama2 because explicitly configured +``` + +The following environment variables can be set. + +| Environment Variable | Description | Example | +| ---------------------------- | -------------------------------------- | ---------------------- | +| MAGENTIC_BACKEND | The package to use as the LLM backend | openai | +| MAGENTIC_LITELLM_MODEL | LiteLLM model | claude-2 | +| MAGENTIC_LITELLM_API_BASE | The base url to query | http://localhost:11434 | +| MAGENTIC_LITELLM_MAX_TOKENS | LiteLLM max number of generated tokens | 1024 | +| MAGENTIC_LITELLM_TEMPERATURE | LiteLLM temperature | 0.5 | +| MAGENTIC_OPENAI_MODEL | OpenAI model | gpt-4 | +| MAGENTIC_OPENAI_API_KEY | OpenAI API key to be used by magentic | sk-... | +| MAGENTIC_OPENAI_API_TYPE | Allowed options: "openai", "azure" | azure | +| MAGENTIC_OPENAI_BASE_URL | Base URL for an OpenAI-compatible API | http://localhost:8080 | +| MAGENTIC_OPENAI_MAX_TOKENS | OpenAI max number of generated tokens | 1024 | +| MAGENTIC_OPENAI_SEED | Seed for deterministic sampling | 42 | +| MAGENTIC_OPENAI_TEMPERATURE | OpenAI temperature | 0.5 | + +When using the `openai` backend, setting the `MAGENTIC_OPENAI_BASE_URL` environment variable or using `OpenaiChatModel(..., base_url="http://localhost:8080")` in code allows you to use `magentic` with any OpenAI-compatible API e.g. [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/ai-services/openai/quickstart?tabs=command-line&pivots=programming-language-python#create-a-new-python-application), [LiteLLM OpenAI Proxy Server](https://docs.litellm.ai/docs/proxy_server), [LocalAI](https://localai.io/howtos/easy-request-openai/). Note that if the API does not support function calling then you will not be able to create prompt-functions that return Python objects, but other features of `magentic` will still work. + +To use Azure with the openai backend you will need to set the `MAGENTIC_OPENAI_API_TYPE` environment variable to "azure" or use `OpenaiChatModel(..., api_type="azure")`, and also set the environment variables needed by the openai package to access Azure. See https://github.com/openai/openai-python#microsoft-azure-openai diff --git a/docs/css/jupyter-notebook.css b/docs/css/jupyter-notebook.css new file mode 100644 index 00000000..75a1dfb7 --- /dev/null +++ b/docs/css/jupyter-notebook.css @@ -0,0 +1,82 @@ +/* Jupyter Notebook Custom CSS */ + +/* Hide "In" labels */ +.jp-InputPrompt, +.jp-InputArea-prompt { + display: none !important; +} + +/* Hide "Out" labels */ +.jp-OutputPrompt, +.jp-OutputArea-prompt { + display: none !important; +} + +/* Add background to cell outputs */ +.jp-OutputArea.jp-Cell-outputArea { + background: var(--md-code-bg-color) !important; +} + +/* Make dataframes match code background color */ +.dataframe { + background: var(--md-code-bg-color); +} +.dataframe tbody tr:nth-child(odd) { + background-color: var(--md-code-bg-color) !important; +} + +/* Make Jupyter code highlighting the same as regular code highlighting */ + +.highlight-ipynb, +.highlight-ipynb :is(.o, .ow) { + background: var(--md-code-bg-color) !important; + color: var(--md-code-hl-operator-color) !important; +} + +.highlight-ipynb .p { + color: var(--md-code-hl-punctuation-color) !important; +} + +.highlight-ipynb :is(.cpf, .l, .s, .sb, .sc, .s2, .si, .s1, .ss) { + color: var(--md-code-hl-string-color) !important; +} + +.highlight-ipynb :is(.cp, .se, .sh, .sr, .sx) { + color: var(--md-code-hl-special-color) !important; +} + +.highlight-ipynb :is(.m, .mb, .mf, .mh, .mi, .il, .mo) { + color: var(--md-code-hl-number-color) !important; +} + +.highlight-ipynb :is(.k, .kd, .kn, .kp, .kr, .kt) { + color: var(--md-code-hl-keyword-color) !important; +} + +.highlight-ipynb :is(.kc, .n) { + color: var(--md-code-hl-name-color) !important; +} + +.highlight-ipynb :is(.no, .nb, .bp) { + color: var(--md-code-hl-constant-color) !important; +} + +.highlight-ipynb :is(.nc, .ne, .nf, .nn) { + color: var(--md-code-hl-function-color) !important; +} + +.highlight-ipynb :is(.nd, .ni, .nl, .nt) { + color: var(--md-code-hl-keyword-color) !important; +} + +.highlight-ipynb :is(.c, .cm, .c1, .ch, .cs, .sd) { + color: var(--md-code-hl-comment-color) !important; +} + +.highlight-ipynb :is(.na, .nv, .vc, .vg, .vi) { + color: var(--md-code-hl-variable-color) !important; +} + +.highlight-ipynb :is(.ge, .gr, .gh, .go, .gp, .gs, .gu, .gt) { + color: var(--md-code-hl-generic-color) !important; +} diff --git a/examples/chain_of_verification/chain_of_verification.ipynb b/docs/examples/chain_of_verification.ipynb similarity index 100% rename from examples/chain_of_verification/chain_of_verification.ipynb rename to docs/examples/chain_of_verification.ipynb diff --git a/examples/custom_function_schemas/register_dataframe_function_schema.ipynb b/docs/examples/registering_custom_type.ipynb similarity index 100% rename from examples/custom_function_schemas/register_dataframe_function_schema.ipynb rename to docs/examples/registering_custom_type.ipynb diff --git a/examples/retrieval_augmented_generation/wikipedia.ipynb b/docs/examples/retrieval_augmented_generation.ipynb similarity index 99% rename from examples/retrieval_augmented_generation/wikipedia.ipynb rename to docs/examples/retrieval_augmented_generation.ipynb index 0618b400..5a056107 100644 --- a/examples/retrieval_augmented_generation/wikipedia.ipynb +++ b/docs/examples/retrieval_augmented_generation.ipynb @@ -4,6 +4,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "# Retrieval Augmented Generation\n", + "\n", "This notebook shows how to perform [Retrieval Augmented Generation (RAG)](https://arxiv.org/abs/2005.11401) using `magentic` and the `wikipedia` API. Essentially providing context to the LLM which it can use when generating its response. This approach allows us to insert new or private information that was not present in the model's training data. The Wikipedia API is used here for demonstration but the methods shown are applicable to any data source." ] }, diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..89706056 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,125 @@ +# Overview + +Easily integrate Large Language Models into your Python code. Simply use the `@prompt` decorator to create functions that return structured output from the LLM. Mix LLM queries and function calling with regular Python code to create complex logic. + +`magentic` is + +- **Compact:** Query LLMs without duplicating boilerplate code. +- **Atomic:** Prompts are functions that can be individually tested and reasoned about. +- **Transparent:** Create "chains" using regular Python code. Define all of your own prompts. +- **Compatible:** Use `@prompt` functions as normal functions, including with decorators like `@lru_cache`. +- **Type Annotated:** Works with linters and IDEs. + +## Installation + +```sh +pip install magentic +``` + +or using poetry + +```sh +poetry add magentic +``` + +Configure your OpenAI API key by setting the `OPENAI_API_KEY` environment variable or using `openai.api_key = "sk-..."`. See the [OpenAI Python library documentation](https://github.com/openai/openai-python#usage) for more information. + +## Usage + +The `@prompt` decorator allows you to define a template for a Large Language Model (LLM) prompt as a Python function. When this function is called, the arguments are inserted into the template, then this prompt is sent to an LLM which generates the function output. + +```python +from magentic import prompt + + +@prompt('Add more "dude"ness to: {phrase}') +def dudeify(phrase: str) -> str: + ... # No function body as this is never executed + + +dudeify("Hello, how are you?") +# "Hey, dude! What's up? How's it going, my man?" +``` + +The `@prompt` decorator will respect the return type annotation of the decorated function. This can be [any type supported by pydantic](https://docs.pydantic.dev/latest/usage/types/types/) including a `pydantic` model. + +```python +from magentic import prompt +from pydantic import BaseModel + + +class Superhero(BaseModel): + name: str + age: int + power: str + enemies: list[str] + + +@prompt("Create a Superhero named {name}.") +def create_superhero(name: str) -> Superhero: + ... + + +create_superhero("Garden Man") +# Superhero(name='Garden Man', age=30, power='Control over plants', enemies=['Pollution Man', 'Concrete Woman']) +``` + +An LLM can also decide to call functions. In this case the `@prompt`-decorated function returns a `FunctionCall` object which can be called to execute the function using the arguments provided by the LLM. + +```python +from typing import Literal + +from magentic import prompt, FunctionCall + + +def activate_oven(temperature: int, mode: Literal["broil", "bake", "roast"]) -> str: + """Turn the oven on with the provided settings.""" + return f"Preheating to {temperature} F with mode {mode}" + + +@prompt( + "Prepare the oven so I can make {food}", + functions=[activate_oven], +) +def configure_oven(food: str) -> FunctionCall[str]: + ... + + +output = configure_oven("cookies!") +# FunctionCall(, temperature=350, mode='bake') +output() +# 'Preheating to 350 F with mode bake' +``` + +Sometimes the LLM requires making one or more function calls to generate a final answer. The `@prompt_chain` decorator will resolve `FunctionCall` objects automatically and pass the output back to the LLM to continue until the final answer is reached. + +In the following example, when `describe_weather` is called the LLM first calls the `get_current_weather` function, then uses the result of this to formulate its final answer which gets returned. + +```python +from magentic import prompt_chain + + +def get_current_weather(location, unit="fahrenheit"): + """Get the current weather in a given location""" + # Pretend to query an API + return { + "location": location, + "temperature": "72", + "unit": unit, + "forecast": ["sunny", "windy"], + } + + +@prompt_chain( + "What's the weather like in {city}?", + functions=[get_current_weather], +) +def describe_weather(city: str) -> str: + ... + + +describe_weather("Boston") +# 'The current weather in Boston is 72°F and it is sunny and windy.' +``` + +LLM-powered functions created using `@prompt` and `@prompt_chain` can be supplied as `functions` to other `@prompt`/`@prompt_chain` decorators, just like regular python functions. This enables increasingly complex LLM-powered functionality, while allowing individual components to be tested and improved in isolation. diff --git a/docs/streaming.md b/docs/streaming.md new file mode 100644 index 00000000..977c4827 --- /dev/null +++ b/docs/streaming.md @@ -0,0 +1,83 @@ +# Streaming + +The `StreamedStr` (and `AsyncStreamedStr`) class can be used to stream the output of the LLM. This allows you to process the text while it is being generated, rather than receiving the whole output at once. + +```python +from magentic import prompt, StreamedStr + + +@prompt("Tell me about {country}") +def describe_country(country: str) -> StreamedStr: + ... + + +# Print the chunks while they are being received +for chunk in describe_country("Brazil"): + print(chunk, end="") +# 'Brazil, officially known as the Federative Republic of Brazil, is ...' +``` + +Multiple `StreamedStr` can be created at the same time to stream LLM outputs concurrently. In the below example, generating the description for multiple countries takes approximately the same amount of time as for a single country. + +```python +from time import time + +countries = ["Australia", "Brazil", "Chile"] + + +# Generate the descriptions one at a time +start_time = time() +for country in countries: + # Converting `StreamedStr` to `str` blocks until the LLM output is fully generated + description = str(describe_country(country)) + print(f"{time() - start_time:.2f}s : {country} - {len(description)} chars") + +# 22.72s : Australia - 2130 chars +# 41.63s : Brazil - 1884 chars +# 74.31s : Chile - 2968 chars + + +# Generate the descriptions concurrently by creating the StreamedStrs at the same time +start_time = time() +streamed_strs = [describe_country(country) for country in countries] +for country, streamed_str in zip(countries, streamed_strs): + description = str(streamed_str) + print(f"{time() - start_time:.2f}s : {country} - {len(description)} chars") + +# 22.79s : Australia - 2147 chars +# 23.64s : Brazil - 2202 chars +# 24.67s : Chile - 2186 chars +``` + +## Object Streaming + +Structured outputs can also be streamed from the LLM by using the return type annotation `Iterable` (or `AsyncIterable`). This allows each item to be processed while the next one is being generated. See the example in examples/quiz for how this can be used to improve user experience by quickly displaying/using the first item returned. + +```python +from collections.abc import Iterable +from time import time + +from magentic import prompt +from pydantic import BaseModel + + +class Superhero(BaseModel): + name: str + age: int + power: str + enemies: list[str] + + +@prompt("Create a Superhero team named {name}.") +def create_superhero_team(name: str) -> Iterable[Superhero]: + ... + + +start_time = time() +for hero in create_superhero_team("The Food Dudes"): + print(f"{time() - start_time:.2f}s : {hero}") + +# 2.23s : name='Pizza Man' age=30 power='Can shoot pizza slices from his hands' enemies=['The Hungry Horde', 'The Junk Food Gang'] +# 4.03s : name='Captain Carrot' age=35 power='Super strength and agility from eating carrots' enemies=['The Sugar Squad', 'The Greasy Gang'] +# 6.05s : name='Ice Cream Girl' age=25 power='Can create ice cream out of thin air' enemies=['The Hot Sauce Squad', 'The Healthy Eaters'] +``` diff --git a/docs/type-checking.md b/docs/type-checking.md new file mode 100644 index 00000000..c5e3d66f --- /dev/null +++ b/docs/type-checking.md @@ -0,0 +1,22 @@ +# Type Checking + +Many type checkers will raise warnings or errors for functions with the `@prompt` decorator due to the function having no body or return value. There are several ways to deal with these. + +1. Disable the check globally for the type checker. For example in mypy by disabling error code `empty-body`. + ```toml + # pyproject.toml + [tool.mypy] + disable_error_code = ["empty-body"] + ``` +1. Make the function body `...` (this does not satisfy mypy) or `raise`. + ```python + @prompt("Choose a color") + def random_color() -> str: + ... + ``` +1. Use comment `# type: ignore[empty-body]` on each function. In this case you can add a docstring instead of `...`. + ```python + @prompt("Choose a color") + def random_color() -> str: # type: ignore[empty-body] + """Returns a random color.""" + ``` diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..52c1f577 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,85 @@ +site_name: Magentic +site_description: Seamlessly integrate LLMs as Python functions +strict: true +site_url: https://magentic.dev/ + +repo_name: jackmpcollins/magentic +repo_url: https://github.com/jackmpcollins/magentic +edit_uri: blob/main/docs/ + +theme: + name: material + palette: + - media: "(prefers-color-scheme)" + - media: "(prefers-color-scheme: light)" + scheme: default + primary: purple + - media: "(prefers-color-scheme: dark)" + scheme: slate + primary: purple + accent: yellow + features: + - content.action.edit + - content.code.copy + - content.code.select + - content.tooltips + - navigation.expand + - navigation.instant + - navigation.instant.prefetch + - navigation.instant.progress + - search.suggest + +plugins: + - mkdocs-jupyter: + # ignore_h1_titles: true + execute: false + +markdown_extensions: + - tables + - toc: + permalink: true + title: Page contents + - admonition + - pymdownx.details + - pymdownx.highlight: + pygments_lang_class: true + - pymdownx.extra + +extra_css: + - css/jupyter-notebook.css + +extra: + analytics: + provider: google + property: G-CWJ8LL02SG + feedback: + title: Was this page helpful? + ratings: + - icon: octicons/thumbsdown-16 + name: This page could be improved + data: 0 + note: >- + Thanks for your feedback! + - icon: octicons/thumbsup-16 + name: This page was helpful + data: 1 + note: >- + Thanks for your feedback! + +# https://www.mkdocs.org/user-guide/configuration/#validation +validation: + omitted_files: warn + absolute_links: warn + unrecognized_links: warn + +nav: + - Overview: index.md + - chat-prompting.md + - asyncio.md + - streaming.md + - configuration.md + - type-checking.md + - Examples: + - examples/registering_custom_type.ipynb + - examples/retrieval_augmented_generation.ipynb + - examples/chain_of_verification.ipynb diff --git a/poetry.lock b/poetry.lock index 4d8ff2fd..36ace957 100644 --- a/poetry.lock +++ b/poetry.lock @@ -523,7 +523,7 @@ files = [ name = "click" version = "8.1.7" description = "Composable command line interface toolkit" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, @@ -882,6 +882,23 @@ smb = ["smbprotocol"] ssh = ["paramiko"] tqdm = ["tqdm"] +[[package]] +name = "ghp-import" +version = "2.1.0" +description = "Copy your docs directly to the gh-pages branch." +optional = false +python-versions = "*" +files = [ + {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, + {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, +] + +[package.dependencies] +python-dateutil = ">=2.8.1" + +[package.extras] +dev = ["flake8", "markdown", "twine", "wheel"] + [[package]] name = "h11" version = "0.14.0" @@ -1477,6 +1494,35 @@ files = [ {file = "jupyterlab_widgets-3.0.10.tar.gz", hash = "sha256:04f2ac04976727e4f9d0fa91cdc2f1ab860f965e504c29dbd6a65c882c9d04c0"}, ] +[[package]] +name = "jupytext" +version = "1.16.1" +description = "Jupyter notebooks as Markdown documents, Julia, Python or R scripts" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupytext-1.16.1-py3-none-any.whl", hash = "sha256:796ec4f68ada663569e5d38d4ef03738a01284bfe21c943c485bc36433898bd0"}, + {file = "jupytext-1.16.1.tar.gz", hash = "sha256:68c7b68685e870e80e60fda8286fbd6269e9c74dc1df4316df6fe46eabc94c99"}, +] + +[package.dependencies] +markdown-it-py = ">=1.0" +mdit-py-plugins = "*" +nbformat = "*" +packaging = "*" +pyyaml = "*" +toml = "*" + +[package.extras] +dev = ["jupytext[test-cov,test-external]"] +docs = ["myst-parser", "sphinx", "sphinx-copybutton", "sphinx-rtd-theme"] +test = ["pytest", "pytest-randomly", "pytest-xdist"] +test-cov = ["jupytext[test-integration]", "pytest-cov (>=2.6.1)"] +test-external = ["autopep8", "black", "flake8", "gitpython", "isort", "jupyter-fs (<0.4.0)", "jupytext[test-integration]", "pre-commit", "sphinx-gallery (<0.8)"] +test-functional = ["jupytext[test]"] +test-integration = ["ipykernel", "jupyter-server (!=2.11)", "jupytext[test-functional]", "nbconvert"] +test-ui = ["calysto-bash"] + [[package]] name = "litellm" version = "1.26.0" @@ -1503,6 +1549,21 @@ tokenizers = "*" extra-proxy = ["streamlit (>=1.29.0,<2.0.0)"] proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "backoff", "fastapi (>=0.104.1,<0.105.0)", "fastapi-sso (>=0.10.0,<0.11.0)", "gunicorn (>=21.2.0,<22.0.0)", "orjson (>=3.9.7,<4.0.0)", "python-multipart (>=0.0.6,<0.0.7)", "pyyaml (>=6.0.1,<7.0.0)", "rq", "uvicorn (>=0.22.0,<0.23.0)"] +[[package]] +name = "markdown" +version = "3.5.2" +description = "Python implementation of John Gruber's Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "Markdown-3.5.2-py3-none-any.whl", hash = "sha256:d43323865d89fc0cb9b20c75fc8ad313af307cc087e84b657d9eec768eddeadd"}, + {file = "Markdown-3.5.2.tar.gz", hash = "sha256:e1ac7b3dc550ee80e602e71c1d168002f062e49f1b11e26a36264dafd4df2ef8"}, +] + +[package.extras] +docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + [[package]] name = "markdown-it-py" version = "3.0.0" @@ -1610,6 +1671,25 @@ files = [ [package.dependencies] traitlets = "*" +[[package]] +name = "mdit-py-plugins" +version = "0.4.0" +description = "Collection of plugins for markdown-it-py" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mdit_py_plugins-0.4.0-py3-none-any.whl", hash = "sha256:b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9"}, + {file = "mdit_py_plugins-0.4.0.tar.gz", hash = "sha256:d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b"}, +] + +[package.dependencies] +markdown-it-py = ">=1.0.0,<4.0.0" + +[package.extras] +code-style = ["pre-commit"] +rtd = ["myst-parser", "sphinx-book-theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "mdurl" version = "0.1.2" @@ -1621,6 +1701,17 @@ files = [ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] +[[package]] +name = "mergedeep" +version = "1.3.4" +description = "A deep merge function for 🐍." +optional = false +python-versions = ">=3.6" +files = [ + {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, + {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, +] + [[package]] name = "mistune" version = "3.0.2" @@ -1632,6 +1723,95 @@ files = [ {file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"}, ] +[[package]] +name = "mkdocs" +version = "1.5.3" +description = "Project documentation with Markdown." +optional = false +python-versions = ">=3.7" +files = [ + {file = "mkdocs-1.5.3-py3-none-any.whl", hash = "sha256:3b3a78e736b31158d64dbb2f8ba29bd46a379d0c6e324c2246c3bc3d2189cfc1"}, + {file = "mkdocs-1.5.3.tar.gz", hash = "sha256:eb7c99214dcb945313ba30426c2451b735992c73c2e10838f76d09e39ff4d0e2"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} +ghp-import = ">=1.0" +jinja2 = ">=2.11.1" +markdown = ">=3.2.1" +markupsafe = ">=2.0.1" +mergedeep = ">=1.3.4" +packaging = ">=20.5" +pathspec = ">=0.11.1" +platformdirs = ">=2.2.0" +pyyaml = ">=5.1" +pyyaml-env-tag = ">=0.1" +watchdog = ">=2.0" + +[package.extras] +i18n = ["babel (>=2.9.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.3)", "jinja2 (==2.11.1)", "markdown (==3.2.1)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "packaging (==20.5)", "pathspec (==0.11.1)", "platformdirs (==2.2.0)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "typing-extensions (==3.10)", "watchdog (==2.0)"] + +[[package]] +name = "mkdocs-jupyter" +version = "0.24.6" +description = "Use Jupyter in mkdocs websites" +optional = false +python-versions = ">=3.9" +files = [ + {file = "mkdocs_jupyter-0.24.6-py3-none-any.whl", hash = "sha256:56fb7ad796f2414a4143d54a966b805caf315c32413e97f85591623fa87dceca"}, + {file = "mkdocs_jupyter-0.24.6.tar.gz", hash = "sha256:89fcbe8a9523864d5416de1a60711640b6bc2972279d2adf46ed2776c2d9ff7c"}, +] + +[package.dependencies] +ipykernel = ">6.0.0,<7.0.0" +jupytext = ">1.13.8,<2" +mkdocs = ">=1.4.0,<2" +mkdocs-material = ">9.0.0" +nbconvert = ">=7.2.9,<8" +pygments = ">2.12.0" + +[[package]] +name = "mkdocs-material" +version = "9.5.11" +description = "Documentation that simply works" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_material-9.5.11-py3-none-any.whl", hash = "sha256:788ee0f3e036dca2dc20298d65e480297d348a44c9d7b2ee05c5262983e66072"}, + {file = "mkdocs_material-9.5.11.tar.gz", hash = "sha256:7af7f8af0dea16175558f3fb9245d26c83a17199baa5f157755e63d7437bf971"}, +] + +[package.dependencies] +babel = ">=2.10,<3.0" +colorama = ">=0.4,<1.0" +jinja2 = ">=3.0,<4.0" +markdown = ">=3.2,<4.0" +mkdocs = ">=1.5.3,<1.6.0" +mkdocs-material-extensions = ">=1.3,<2.0" +paginate = ">=0.5,<1.0" +pygments = ">=2.16,<3.0" +pymdown-extensions = ">=10.2,<11.0" +regex = ">=2022.4" +requests = ">=2.26,<3.0" + +[package.extras] +git = ["mkdocs-git-committers-plugin-2 (>=1.1,<2.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] +imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"] +recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"] + +[[package]] +name = "mkdocs-material-extensions" +version = "1.3.1" +description = "Extension pack for Python Markdown and MkDocs Material." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, + {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, +] + [[package]] name = "multidict" version = "6.0.5" @@ -2010,6 +2190,16 @@ files = [ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, ] +[[package]] +name = "paginate" +version = "0.5.6" +description = "Divides large result sets into pages for easier browsing" +optional = false +python-versions = "*" +files = [ + {file = "paginate-0.5.6.tar.gz", hash = "sha256:5e6007b6a9398177a7e1648d04fdd9f8c9766a1a945bceac82f1929e8c78af2d"}, +] + [[package]] name = "pandas" version = "2.2.1" @@ -2109,6 +2299,17 @@ files = [ qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] testing = ["docopt", "pytest (<6.0.0)"] +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + [[package]] name = "pexpect" version = "4.9.0" @@ -2400,6 +2601,24 @@ files = [ plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pymdown-extensions" +version = "10.7" +description = "Extension pack for Python Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pymdown_extensions-10.7-py3-none-any.whl", hash = "sha256:6ca215bc57bc12bf32b414887a68b810637d039124ed9b2e5bd3325cbb2c050c"}, + {file = "pymdown_extensions-10.7.tar.gz", hash = "sha256:c0d64d5cf62566f59e6b2b690a4095c931107c250a8c8e1351c1de5f6b036deb"}, +] + +[package.dependencies] +markdown = ">=3.5" +pyyaml = "*" + +[package.extras] +extra = ["pygments (>=2.12)"] + [[package]] name = "pytest" version = "8.0.1" @@ -2610,6 +2829,20 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] +[[package]] +name = "pyyaml-env-tag" +version = "0.1" +description = "A custom YAML tag for referencing environment variables in YAML files. " +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, + {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, +] + +[package.dependencies] +pyyaml = "*" + [[package]] name = "pyzmq" version = "25.1.2" @@ -2776,7 +3009,7 @@ rpds-py = ">=0.7.0" name = "regex" version = "2023.12.25" description = "Alternative regular expression module, to replace re." -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "regex-2023.12.25-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0694219a1d54336fd0445ea382d49d36882415c0134ee1e8332afd1529f0baa5"}, @@ -3358,6 +3591,17 @@ dev = ["tokenizers[testing]"] docs = ["setuptools_rust", "sphinx", "sphinx_rtd_theme"] testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tomli" version = "2.0.1" @@ -3488,6 +3732,47 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "watchdog" +version = "4.0.0" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, + {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, + {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, + {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, + {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, + {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, + {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, + {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + [[package]] name = "wcwidth" version = "0.2.13" @@ -3690,4 +3975,4 @@ litellm = ["litellm"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<4.0" -content-hash = "97d46a001911c31a35e39c25e4c77cc775de9883386dfcf23b7a8af1a9a57488" +content-hash = "37812979d0ed26933826034dac7fb0dd6e91b4004e2c9804d21ba98d466f39dd" diff --git a/pyproject.toml b/pyproject.toml index 21f50c83..dc4690f8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,11 @@ pytest-clarity = "*" pytest-cov = "*" ruff = "*" +[tool.poetry.group.docs.dependencies] +mkdocs = "^1.5.3" +mkdocs-jupyter = "^0.24.6" +mkdocs-material = "^9.5.11" + [tool.poetry.group.examples.dependencies] jupyter = "*" pandas = "^2.2.1" @@ -93,6 +98,9 @@ mark-parentheses = false known-first-party = ["magentic"] [tool.ruff.lint.per-file-ignores] +"docs/examples/*" = [ + "T20", # flake8-print +] "examples/*" = [ "T20", # flake8-print ]