From 2b2433e78ebad5d814605daaa648d0c31c3075cc Mon Sep 17 00:00:00 2001 From: Ivan Leo Date: Thu, 28 Nov 2024 12:59:26 +0800 Subject: [PATCH 1/2] feat: added cortex documentation --- docs/integrations/cortex.md | 178 ++++++++++++++++++++++++++++++++++++ docs/integrations/index.md | 1 + mkdocs.yml | 1 + 3 files changed, 180 insertions(+) create mode 100644 docs/integrations/cortex.md diff --git a/docs/integrations/cortex.md b/docs/integrations/cortex.md new file mode 100644 index 000000000..0c1d5479a --- /dev/null +++ b/docs/integrations/cortex.md @@ -0,0 +1,178 @@ +--- +title: "Structured outputs with Cortex, a complete guide w/ instructor" +description: "Learn how to use Cortex with Instructor for structured outputs. Complete guide with examples and best practices." +--- + +# Structured outputs with Cortex + +Cortex.cpp is a runtime that helps you run open source LLMs out of the box. It supports a wide variety of models and powers their [Jan](https://jan.ai) platform. This guide provides a quickstart on how to use Cortex with instructor for structured outputs. + +## Quick Start + +Instructor comes with support for the OpenAI client out of the box, so you don't need to install anything extra. + +```bash +pip install "instructor" +``` + +Once you've done so, make sure to pull the model that you'd like to use. In this example, we'll be using a quantized llama3.2 model. + +```bash +cortex run llama3.2:3b-gguf-q4-km +``` + +Let's start by initializing the client below - note that we need to provide a base URL and an API key here. The API key isn't important, it's just so the OpenAI client doesn't throw an error. + +```python +import os +from openai import OpenAI + +client = from_openai( + openai.OpenAI( + base_url="http://localhost:39281/v1", + api_key="this is a fake api key that doesn't matter", + ) +) +``` + +## Simple User Example (Sync) + +```python +from instructor import from_openai +from pydantic import BaseModel +import openai + +client = from_openai( + openai.OpenAI( + base_url="http://localhost:39281/v1", + api_key="this is a fake api key that doesn't matter", + ) +) + + +class User(BaseModel): + name: str + age: int + + +resp = client.chat.completions.create( + model="llama3.2:3b-gguf-q4-km", + messages=[{"role": "user", "content": "Ivan is 27 and lives in Singapore"}], + response_model=User, +) + +print(resp) +# > name='Ivan', age=27 +``` + +## Simple User Example (Async) + +```python +import os +from openai import AsyncOpenAI +import instructor +from pydantic import BaseModel +import asyncio + +# Initialize with API key +client = AsyncOpenAI(api_key=os.getenv('OPENAI_API_KEY')) + +# Enable instructor patches for async OpenAI client +client = instructor.from_openai(client) + +class User(BaseModel): + name: str + age: int + +async def extract_user(): + user = await client.chat.completions.create( + model="llama3.2:3b-gguf-q4-km", + messages=[ + {"role": "user", "content": "Extract: Jason is 25 years old"}, + ], + response_model=User, + ) + return user + +# Run async function +user = asyncio.run(extract_user()) +print(user) +#> User(name='Jason', age=25) +``` + +## Nested Example + +```python +from instructor import from_openai +from pydantic import BaseModel +import openai + +client = from_openai( + openai.OpenAI( + base_url="http://localhost:39281/v1", + api_key="this is a fake api key that doesn't matter", + ) +) + + +class Address(BaseModel): + street: str + city: str + country: str + + +class User(BaseModel): + name: str + age: int + addresses: list[Address] + + +user = client.chat.completions.create( + model="llama3.2:3b-gguf-q4-km", + messages=[ + { + "role": "user", + "content": """ + Extract: Jason is 25 years old. + He lives at 123 Main St, New York, USA + and has a summer house at 456 Beach Rd, Miami, USA + """, + }, + ], + response_model=User, +) + +print(user) + +#> { +#> 'name': 'Jason', +#> 'age': 25, +#> 'addresses': [ +#> { +#> 'street': '123 Main St', +#> 'city': 'New York', +#> 'country': 'USA' +#> }, +#> { +#> 'street': '456 Beach Rd', +#> 'city': 'Miami', +#> 'country': 'USA' +#> } +#> ] +#> } +``` + +In this tutorial we've seen how we can run local models with Cortex while simplifying a lot of the logic around managing retries and function calling with our simple interface. + +We'll be publishing a lot more content on Cortex and how to work with local models moving forward so do keep an eye out for that. + +## Related Resources + +- [Cortex Documentation](https://cortex.so/docs/) +- [Instructor Core Concepts](../concepts/index.md) +- [Type Validation Guide](../concepts/validation.md) +- [Advanced Usage Examples](../examples/index.md) + +## Updates and Compatibility + +Instructor maintains compatibility with the latest OpenAI API versions and models. Check the [changelog](https://github.com/jxnl/instructor/blob/main/CHANGELOG.md) for updates. diff --git a/docs/integrations/index.md b/docs/integrations/index.md index 28414dac0..ff4336505 100644 --- a/docs/integrations/index.md +++ b/docs/integrations/index.md @@ -16,6 +16,7 @@ Instructor supports a wide range of AI model providers, each with their own capa - [Ollama](./ollama.md) - Run open-source models locally - [llama-cpp-python](./llama-cpp-python.md) - Python bindings for llama.cpp - [Together AI](./together.md) - Host and run open source models +- [Cortex](./cortex.md) - Run open source models with Cortex ### Cloud AI Providers diff --git a/mkdocs.yml b/mkdocs.yml index 7ec4063ea..75d5251e3 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -213,6 +213,7 @@ nav: - Azure OpenAI: 'integrations/azure.md' - Cerebras: 'integrations/cerebras.md' - Cohere: 'integrations/cohere.md' + - Cortex: 'integrations/cortex.md' - Fireworks: 'integrations/fireworks.md' - Gemini: 'integrations/google.md' - Groq: 'integrations/groq.md' From d66fa965b8eac1ab4efcb95140ec1e40e535eeb9 Mon Sep 17 00:00:00 2001 From: Ivan Leo Date: Thu, 28 Nov 2024 13:10:50 +0800 Subject: [PATCH 2/2] fix: updated import --- docs/integrations/cortex.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/integrations/cortex.md b/docs/integrations/cortex.md index 0c1d5479a..46d1c6782 100644 --- a/docs/integrations/cortex.md +++ b/docs/integrations/cortex.md @@ -75,10 +75,12 @@ from pydantic import BaseModel import asyncio # Initialize with API key -client = AsyncOpenAI(api_key=os.getenv('OPENAI_API_KEY')) - -# Enable instructor patches for async OpenAI client -client = instructor.from_openai(client) +client = from_openai( + openai.AsyncOpenAI( + base_url="http://localhost:39281/v1", + api_key="this is a fake api key that doesn't matter", + ) +) class User(BaseModel): name: str