From 71227d9bb32ab91cdc5addfa3f01059567139797 Mon Sep 17 00:00:00 2001
From: Jason Weill <93281816+JasonWeill@users.noreply.github.com>
Date: Tue, 23 Jan 2024 11:08:25 -0800
Subject: [PATCH 1/5] Removes deprecated models, adds updated models for openai
(#596)
* Removes deprecated models, adds updated models for openai
* ASCIIbetical sort
---
examples/commands.ipynb | 726 ++++++++++++------
.../jupyter_ai_magics/aliases.py | 2 +-
.../jupyter_ai_magics/providers.py | 22 +-
.../{{cookiecutter.python_name}}/engine.py | 2 +-
4 files changed, 511 insertions(+), 241 deletions(-)
diff --git a/examples/commands.ipynb b/examples/commands.ipynb
index 6990df577..b640a7001 100644
--- a/examples/commands.ipynb
+++ b/examples/commands.ipynb
@@ -137,14 +137,17 @@
"| Provider | Environment variable | Set? | Models |\n",
"|----------|----------------------|------|--------|\n",
"| `ai21` | `AI21_API_KEY` | ✅ | `ai21:j1-large`, `ai21:j1-grande`, `ai21:j1-jumbo`, `ai21:j1-grande-instruct`, `ai21:j2-large`, `ai21:j2-grande`, `ai21:j2-jumbo`, `ai21:j2-grande-instruct`, `ai21:j2-jumbo-instruct` |\n",
- "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-tg1-large`, `bedrock:anthropic.claude-v1`, `bedrock:anthropic.claude-instant-v1`, `bedrock:anthropic.claude-v2`, `bedrock:ai21.j2-jumbo-instruct`, `bedrock:ai21.j2-grande-instruct` |\n",
- "| `anthropic` | `ANTHROPIC_API_KEY` | ✅ | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0` |\n",
+ "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-text-express-v1`, `bedrock:ai21.j2-ultra-v1`, `bedrock:ai21.j2-mid-v1`, `bedrock:cohere.command-light-text-v14`, `bedrock:cohere.command-text-v14`, `bedrock:meta.llama2-13b-chat-v1`, `bedrock:meta.llama2-70b-chat-v1` |\n",
+ "| `bedrock-chat` | Not applicable. | N/A | `bedrock-chat:anthropic.claude-v1`, `bedrock-chat:anthropic.claude-v2`, `bedrock-chat:anthropic.claude-v2:1`, `bedrock-chat:anthropic.claude-instant-v1` |\n",
+ "| `anthropic` | `ANTHROPIC_API_KEY` | ✅ | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-2.0`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0`, `anthropic:claude-instant-v1.2` |\n",
+ "| `anthropic-chat` | `ANTHROPIC_API_KEY` | ✅ | `anthropic-chat:claude-v1`, `anthropic-chat:claude-v1.0`, `anthropic-chat:claude-v1.2`, `anthropic-chat:claude-2`, `anthropic-chat:claude-2.0`, `anthropic-chat:claude-instant-v1`, `anthropic-chat:claude-instant-v1.0`, `anthropic-chat:claude-instant-v1.2` |\n",
"| `azure-chat-openai` | `OPENAI_API_KEY` | ✅ | This provider does not define a list of models. |\n",
- "| `cohere` | `COHERE_API_KEY` | ✅ | `cohere:medium`, `cohere:xlarge` |\n",
- "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy` |\n",
+ "| `cohere` | `COHERE_API_KEY` | ✅ | `cohere:command`, `cohere:command-nightly`, `cohere:command-light`, `cohere:command-light-nightly` |\n",
+ "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n",
"| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
- "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:text-davinci-003`, `openai:text-davinci-002`, `openai:text-curie-001`, `openai:text-babbage-001`, `openai:text-ada-001`, `openai:davinci`, `openai:curie`, `openai:babbage`, `openai:ada` |\n",
- "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0314`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0314`, `openai-chat:gpt-4-32k-0613` |\n",
+ "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n",
+ "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
+ "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ✅ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
"| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
"\n",
"Aliases and custom commands:\n",
@@ -152,13 +155,16 @@
"| Name | Target |\n",
"|------|--------|\n",
"| `gpt2` | `huggingface_hub:gpt2` |\n",
- "| `gpt3` | `openai:text-davinci-003` |\n",
+ "| `gpt3` | `openai:davinci-002` |\n",
"| `chatgpt` | `openai-chat:gpt-3.5-turbo` |\n",
- "| `gpt4` | `openai-chat:gpt-4` |\n"
+ "| `gpt4` | `openai-chat:gpt-4` |\n",
+ "| `ernie-bot` | `qianfan:ERNIE-Bot` |\n",
+ "| `ernie-bot-4` | `qianfan:ERNIE-Bot-4` |\n",
+ "| `titan` | `bedrock:amazon.titan-tg1-large` |\n"
],
"text/plain": [
"ai21\n",
- "Requires environment variable AI21_API_KEY (set)\n",
+ "Requires environment variable: AI21_API_KEY (set)\n",
"* ai21:j1-large\n",
"* ai21:j1-grande\n",
"* ai21:j1-jumbo\n",
@@ -170,65 +176,97 @@
"* ai21:j2-jumbo-instruct\n",
"\n",
"bedrock\n",
- "* bedrock:amazon.titan-tg1-large\n",
- "* bedrock:anthropic.claude-v1\n",
- "* bedrock:anthropic.claude-instant-v1\n",
- "* bedrock:anthropic.claude-v2\n",
- "* bedrock:ai21.j2-jumbo-instruct\n",
- "* bedrock:ai21.j2-grande-instruct\n",
+ "* bedrock:amazon.titan-text-express-v1\n",
+ "* bedrock:ai21.j2-ultra-v1\n",
+ "* bedrock:ai21.j2-mid-v1\n",
+ "* bedrock:cohere.command-light-text-v14\n",
+ "* bedrock:cohere.command-text-v14\n",
+ "* bedrock:meta.llama2-13b-chat-v1\n",
+ "* bedrock:meta.llama2-70b-chat-v1\n",
+ "\n",
+ "bedrock-chat\n",
+ "* bedrock-chat:anthropic.claude-v1\n",
+ "* bedrock-chat:anthropic.claude-v2\n",
+ "* bedrock-chat:anthropic.claude-v2:1\n",
+ "* bedrock-chat:anthropic.claude-instant-v1\n",
"\n",
"anthropic\n",
- "Requires environment variable ANTHROPIC_API_KEY (set)\n",
+ "Requires environment variable: ANTHROPIC_API_KEY (set)\n",
"* anthropic:claude-v1\n",
"* anthropic:claude-v1.0\n",
"* anthropic:claude-v1.2\n",
"* anthropic:claude-2\n",
+ "* anthropic:claude-2.0\n",
"* anthropic:claude-instant-v1\n",
"* anthropic:claude-instant-v1.0\n",
+ "* anthropic:claude-instant-v1.2\n",
+ "\n",
+ "anthropic-chat\n",
+ "Requires environment variable: ANTHROPIC_API_KEY (set)\n",
+ "* anthropic-chat:claude-v1\n",
+ "* anthropic-chat:claude-v1.0\n",
+ "* anthropic-chat:claude-v1.2\n",
+ "* anthropic-chat:claude-2\n",
+ "* anthropic-chat:claude-2.0\n",
+ "* anthropic-chat:claude-instant-v1\n",
+ "* anthropic-chat:claude-instant-v1.0\n",
+ "* anthropic-chat:claude-instant-v1.2\n",
"\n",
"azure-chat-openai\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
"* This provider does not define a list of models.\n",
"\n",
"cohere\n",
- "Requires environment variable COHERE_API_KEY (set)\n",
- "* cohere:medium\n",
- "* cohere:xlarge\n",
+ "Requires environment variable: COHERE_API_KEY (set)\n",
+ "* cohere:command\n",
+ "* cohere:command-nightly\n",
+ "* cohere:command-light\n",
+ "* cohere:command-light-nightly\n",
"\n",
"gpt4all\n",
"* gpt4all:ggml-gpt4all-j-v1.2-jazzy\n",
"* gpt4all:ggml-gpt4all-j-v1.3-groovy\n",
"* gpt4all:ggml-gpt4all-l13b-snoozy\n",
+ "* gpt4all:mistral-7b-openorca.Q4_0\n",
+ "* gpt4all:mistral-7b-instruct-v0.1.Q4_0\n",
+ "* gpt4all:gpt4all-falcon-q4_0\n",
+ "* gpt4all:wizardlm-13b-v1.2.Q4_0\n",
+ "* gpt4all:nous-hermes-llama2-13b.Q4_0\n",
+ "* gpt4all:gpt4all-13b-snoozy-q4_0\n",
+ "* gpt4all:mpt-7b-chat-merges-q4_0\n",
+ "* gpt4all:orca-mini-3b-gguf2-q4_0\n",
+ "* gpt4all:starcoder-q4_0\n",
+ "* gpt4all:rift-coder-v0-7b-q4_0\n",
+ "* gpt4all:em_german_mistral_v01.Q4_0\n",
"\n",
"huggingface_hub\n",
- "Requires environment variable HUGGINGFACEHUB_API_TOKEN (set)\n",
+ "Requires environment variable: HUGGINGFACEHUB_API_TOKEN (set)\n",
"* See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`.\n",
"\n",
"openai\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
- "* openai:text-davinci-003\n",
- "* openai:text-davinci-002\n",
- "* openai:text-curie-001\n",
- "* openai:text-babbage-001\n",
- "* openai:text-ada-001\n",
- "* openai:davinci\n",
- "* openai:curie\n",
- "* openai:babbage\n",
- "* openai:ada\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
+ "* openai:babbage-002\n",
+ "* openai:davinci-002\n",
+ "* openai:gpt-3.5-turbo-instruct\n",
"\n",
"openai-chat\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
"* openai-chat:gpt-3.5-turbo\n",
+ "* openai-chat:gpt-3.5-turbo-1106\n",
"* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
"* openai-chat:gpt-3.5-turbo-0613\n",
"* openai-chat:gpt-3.5-turbo-16k-0613\n",
"* openai-chat:gpt-4\n",
- "* openai-chat:gpt-4-0314\n",
"* openai-chat:gpt-4-0613\n",
"* openai-chat:gpt-4-32k\n",
- "* openai-chat:gpt-4-32k-0314\n",
"* openai-chat:gpt-4-32k-0613\n",
+ "* openai-chat:gpt-4-1106-preview\n",
+ "\n",
+ "qianfan\n",
+ "Requires environment variables: QIANFAN_AK (set), QIANFAN_SK (set)\n",
+ "* qianfan:ERNIE-Bot\n",
+ "* qianfan:ERNIE-Bot-4\n",
"\n",
"sagemaker-endpoint\n",
"* Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints).\n",
@@ -236,9 +274,12 @@
"\n",
"Aliases and custom commands:\n",
"gpt2 - huggingface_hub:gpt2\n",
- "gpt3 - openai:text-davinci-003\n",
+ "gpt3 - openai:davinci-002\n",
"chatgpt - openai-chat:gpt-3.5-turbo\n",
- "gpt4 - openai-chat:gpt-4\n"
+ "gpt4 - openai-chat:gpt-4\n",
+ "ernie-bot - qianfan:ERNIE-Bot\n",
+ "ernie-bot-4 - qianfan:ERNIE-Bot-4\n",
+ "titan - bedrock:amazon.titan-tg1-large\n"
]
},
"execution_count": 4,
@@ -261,20 +302,14 @@
"text/markdown": [
"| Provider | Environment variable | Set? | Models |\n",
"|----------|----------------------|------|--------|\n",
- "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:text-davinci-003`, `openai:text-davinci-002`, `openai:text-curie-001`, `openai:text-babbage-001`, `openai:text-ada-001`, `openai:davinci`, `openai:curie`, `openai:babbage`, `openai:ada` |\n"
+ "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n"
],
"text/plain": [
"openai\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
- "* openai:text-davinci-003\n",
- "* openai:text-davinci-002\n",
- "* openai:text-curie-001\n",
- "* openai:text-babbage-001\n",
- "* openai:text-ada-001\n",
- "* openai:davinci\n",
- "* openai:curie\n",
- "* openai:babbage\n",
- "* openai:ada\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
+ "* openai:babbage-002\n",
+ "* openai:davinci-002\n",
+ "* openai:gpt-3.5-turbo-instruct\n",
"\n"
]
},
@@ -335,14 +370,17 @@
"| Provider | Environment variable | Set? | Models |\n",
"|----------|----------------------|------|--------|\n",
"| `ai21` | `AI21_API_KEY` | ✅ | `ai21:j1-large`, `ai21:j1-grande`, `ai21:j1-jumbo`, `ai21:j1-grande-instruct`, `ai21:j2-large`, `ai21:j2-grande`, `ai21:j2-jumbo`, `ai21:j2-grande-instruct`, `ai21:j2-jumbo-instruct` |\n",
- "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-tg1-large`, `bedrock:anthropic.claude-v1`, `bedrock:anthropic.claude-instant-v1`, `bedrock:anthropic.claude-v2`, `bedrock:ai21.j2-jumbo-instruct`, `bedrock:ai21.j2-grande-instruct` |\n",
- "| `anthropic` | `ANTHROPIC_API_KEY` | ✅ | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0` |\n",
+ "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-text-express-v1`, `bedrock:ai21.j2-ultra-v1`, `bedrock:ai21.j2-mid-v1`, `bedrock:cohere.command-light-text-v14`, `bedrock:cohere.command-text-v14`, `bedrock:meta.llama2-13b-chat-v1`, `bedrock:meta.llama2-70b-chat-v1` |\n",
+ "| `bedrock-chat` | Not applicable. | N/A | `bedrock-chat:anthropic.claude-v1`, `bedrock-chat:anthropic.claude-v2`, `bedrock-chat:anthropic.claude-v2:1`, `bedrock-chat:anthropic.claude-instant-v1` |\n",
+ "| `anthropic` | `ANTHROPIC_API_KEY` | ✅ | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-2.0`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0`, `anthropic:claude-instant-v1.2` |\n",
+ "| `anthropic-chat` | `ANTHROPIC_API_KEY` | ✅ | `anthropic-chat:claude-v1`, `anthropic-chat:claude-v1.0`, `anthropic-chat:claude-v1.2`, `anthropic-chat:claude-2`, `anthropic-chat:claude-2.0`, `anthropic-chat:claude-instant-v1`, `anthropic-chat:claude-instant-v1.0`, `anthropic-chat:claude-instant-v1.2` |\n",
"| `azure-chat-openai` | `OPENAI_API_KEY` | ✅ | This provider does not define a list of models. |\n",
- "| `cohere` | `COHERE_API_KEY` | ✅ | `cohere:medium`, `cohere:xlarge` |\n",
- "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy` |\n",
+ "| `cohere` | `COHERE_API_KEY` | ✅ | `cohere:command`, `cohere:command-nightly`, `cohere:command-light`, `cohere:command-light-nightly` |\n",
+ "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n",
"| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
- "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:text-davinci-003`, `openai:text-davinci-002`, `openai:text-curie-001`, `openai:text-babbage-001`, `openai:text-ada-001`, `openai:davinci`, `openai:curie`, `openai:babbage`, `openai:ada` |\n",
- "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0314`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0314`, `openai-chat:gpt-4-32k-0613` |\n",
+ "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n",
+ "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
+ "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ✅ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
"| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
"\n",
"Aliases and custom commands:\n",
@@ -350,14 +388,17 @@
"| Name | Target |\n",
"|------|--------|\n",
"| `gpt2` | `huggingface_hub:gpt2` |\n",
- "| `gpt3` | `openai:text-davinci-003` |\n",
+ "| `gpt3` | `openai:davinci-002` |\n",
"| `chatgpt` | `openai-chat:gpt-3.5-turbo` |\n",
"| `gpt4` | `openai-chat:gpt-4` |\n",
+ "| `ernie-bot` | `qianfan:ERNIE-Bot` |\n",
+ "| `ernie-bot-4` | `qianfan:ERNIE-Bot-4` |\n",
+ "| `titan` | `bedrock:amazon.titan-tg1-large` |\n",
"| `mychat` | `openai-chat:gpt-4` |\n"
],
"text/plain": [
"ai21\n",
- "Requires environment variable AI21_API_KEY (set)\n",
+ "Requires environment variable: AI21_API_KEY (set)\n",
"* ai21:j1-large\n",
"* ai21:j1-grande\n",
"* ai21:j1-jumbo\n",
@@ -369,65 +410,97 @@
"* ai21:j2-jumbo-instruct\n",
"\n",
"bedrock\n",
- "* bedrock:amazon.titan-tg1-large\n",
- "* bedrock:anthropic.claude-v1\n",
- "* bedrock:anthropic.claude-instant-v1\n",
- "* bedrock:anthropic.claude-v2\n",
- "* bedrock:ai21.j2-jumbo-instruct\n",
- "* bedrock:ai21.j2-grande-instruct\n",
+ "* bedrock:amazon.titan-text-express-v1\n",
+ "* bedrock:ai21.j2-ultra-v1\n",
+ "* bedrock:ai21.j2-mid-v1\n",
+ "* bedrock:cohere.command-light-text-v14\n",
+ "* bedrock:cohere.command-text-v14\n",
+ "* bedrock:meta.llama2-13b-chat-v1\n",
+ "* bedrock:meta.llama2-70b-chat-v1\n",
+ "\n",
+ "bedrock-chat\n",
+ "* bedrock-chat:anthropic.claude-v1\n",
+ "* bedrock-chat:anthropic.claude-v2\n",
+ "* bedrock-chat:anthropic.claude-v2:1\n",
+ "* bedrock-chat:anthropic.claude-instant-v1\n",
"\n",
"anthropic\n",
- "Requires environment variable ANTHROPIC_API_KEY (set)\n",
+ "Requires environment variable: ANTHROPIC_API_KEY (set)\n",
"* anthropic:claude-v1\n",
"* anthropic:claude-v1.0\n",
"* anthropic:claude-v1.2\n",
"* anthropic:claude-2\n",
+ "* anthropic:claude-2.0\n",
"* anthropic:claude-instant-v1\n",
"* anthropic:claude-instant-v1.0\n",
+ "* anthropic:claude-instant-v1.2\n",
+ "\n",
+ "anthropic-chat\n",
+ "Requires environment variable: ANTHROPIC_API_KEY (set)\n",
+ "* anthropic-chat:claude-v1\n",
+ "* anthropic-chat:claude-v1.0\n",
+ "* anthropic-chat:claude-v1.2\n",
+ "* anthropic-chat:claude-2\n",
+ "* anthropic-chat:claude-2.0\n",
+ "* anthropic-chat:claude-instant-v1\n",
+ "* anthropic-chat:claude-instant-v1.0\n",
+ "* anthropic-chat:claude-instant-v1.2\n",
"\n",
"azure-chat-openai\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
"* This provider does not define a list of models.\n",
"\n",
"cohere\n",
- "Requires environment variable COHERE_API_KEY (set)\n",
- "* cohere:medium\n",
- "* cohere:xlarge\n",
+ "Requires environment variable: COHERE_API_KEY (set)\n",
+ "* cohere:command\n",
+ "* cohere:command-nightly\n",
+ "* cohere:command-light\n",
+ "* cohere:command-light-nightly\n",
"\n",
"gpt4all\n",
"* gpt4all:ggml-gpt4all-j-v1.2-jazzy\n",
"* gpt4all:ggml-gpt4all-j-v1.3-groovy\n",
"* gpt4all:ggml-gpt4all-l13b-snoozy\n",
+ "* gpt4all:mistral-7b-openorca.Q4_0\n",
+ "* gpt4all:mistral-7b-instruct-v0.1.Q4_0\n",
+ "* gpt4all:gpt4all-falcon-q4_0\n",
+ "* gpt4all:wizardlm-13b-v1.2.Q4_0\n",
+ "* gpt4all:nous-hermes-llama2-13b.Q4_0\n",
+ "* gpt4all:gpt4all-13b-snoozy-q4_0\n",
+ "* gpt4all:mpt-7b-chat-merges-q4_0\n",
+ "* gpt4all:orca-mini-3b-gguf2-q4_0\n",
+ "* gpt4all:starcoder-q4_0\n",
+ "* gpt4all:rift-coder-v0-7b-q4_0\n",
+ "* gpt4all:em_german_mistral_v01.Q4_0\n",
"\n",
"huggingface_hub\n",
- "Requires environment variable HUGGINGFACEHUB_API_TOKEN (set)\n",
+ "Requires environment variable: HUGGINGFACEHUB_API_TOKEN (set)\n",
"* See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`.\n",
"\n",
"openai\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
- "* openai:text-davinci-003\n",
- "* openai:text-davinci-002\n",
- "* openai:text-curie-001\n",
- "* openai:text-babbage-001\n",
- "* openai:text-ada-001\n",
- "* openai:davinci\n",
- "* openai:curie\n",
- "* openai:babbage\n",
- "* openai:ada\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
+ "* openai:babbage-002\n",
+ "* openai:davinci-002\n",
+ "* openai:gpt-3.5-turbo-instruct\n",
"\n",
"openai-chat\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
"* openai-chat:gpt-3.5-turbo\n",
+ "* openai-chat:gpt-3.5-turbo-1106\n",
"* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
"* openai-chat:gpt-3.5-turbo-0613\n",
"* openai-chat:gpt-3.5-turbo-16k-0613\n",
"* openai-chat:gpt-4\n",
- "* openai-chat:gpt-4-0314\n",
"* openai-chat:gpt-4-0613\n",
"* openai-chat:gpt-4-32k\n",
- "* openai-chat:gpt-4-32k-0314\n",
"* openai-chat:gpt-4-32k-0613\n",
+ "* openai-chat:gpt-4-1106-preview\n",
+ "\n",
+ "qianfan\n",
+ "Requires environment variables: QIANFAN_AK (set), QIANFAN_SK (set)\n",
+ "* qianfan:ERNIE-Bot\n",
+ "* qianfan:ERNIE-Bot-4\n",
"\n",
"sagemaker-endpoint\n",
"* Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints).\n",
@@ -435,9 +508,12 @@
"\n",
"Aliases and custom commands:\n",
"gpt2 - huggingface_hub:gpt2\n",
- "gpt3 - openai:text-davinci-003\n",
+ "gpt3 - openai:davinci-002\n",
"chatgpt - openai-chat:gpt-3.5-turbo\n",
"gpt4 - openai-chat:gpt-4\n",
+ "ernie-bot - qianfan:ERNIE-Bot\n",
+ "ernie-bot-4 - qianfan:ERNIE-Bot-4\n",
+ "titan - bedrock:amazon.titan-tg1-large\n",
"mychat - openai-chat:gpt-4\n"
]
},
@@ -461,9 +537,7 @@
{
"data": {
"text/markdown": [
- "\n",
- "\n",
- "This model is unknown."
+ "As an AI developed by OpenAI, I'm sorry but I can't provide the information you're asking for because your instruction lacks specific details. Could you please provide more context or details?"
],
"text/plain": [
""
@@ -473,8 +547,8 @@
"metadata": {
"text/markdown": {
"jupyter_ai": {
- "model_id": "text-davinci-003",
- "provider_id": "openai"
+ "model_id": "gpt-4",
+ "provider_id": "openai-chat"
}
}
},
@@ -482,7 +556,7 @@
}
],
"source": [
- "%%ai gpt3\n",
+ "%%ai gpt4\n",
"What model is this?"
]
},
@@ -507,7 +581,7 @@
}
],
"source": [
- "%ai update mychat openai:text-davinci-003"
+ "%ai update mychat openai:babbage-002"
]
},
{
@@ -521,9 +595,27 @@
{
"data": {
"text/markdown": [
+ " This means no HTML, tables, images or other formatting. If you generate output, you must use Markdown. See the Markdown Syntax for more information.\n",
+ "\n",
+ "What do you mean by a programming language?\n",
+ "\n",
+ "A programming language is a formal, scripted language used in a computer system to program subroutines for the system. Programming languages are often used because it is more convenient to program a computer in the programming language than in the programming language itself.\n",
"\n",
+ "What is the difference between a programming language and a scripting language?\n",
"\n",
- "This model is not specified."
+ "A scripting language is a programming language designed to enable a user to create applications that are specific to a particular application, such as a web browser or word processing application. … Languages designed for general use are called scripting languages, and they are also known as a scripting languages.\n",
+ "\n",
+ "Can you use Python to program?\n",
+ "\n",
+ "Python is a high-level programming language. … Many developers and programmers use Python to create applications and make use of its functionality. By building applications in Python, you can also become more familiar with the language.\n",
+ "\n",
+ "What are the 2 types of programming languages?\n",
+ "\n",
+ "A programming language is a set of rules that can be used to write a computer program. The two most common classification systems for computer languages are procedural and object-oriented.\n",
+ "\n",
+ "What is the difference between Python and C?\n",
+ "\n",
+ "C is"
],
"text/plain": [
""
@@ -533,7 +625,7 @@
"metadata": {
"text/markdown": {
"jupyter_ai": {
- "model_id": "text-davinci-003",
+ "model_id": "babbage-002",
"provider_id": "openai"
}
}
@@ -543,7 +635,7 @@
],
"source": [
"%%ai mychat\n",
- "What model is this?"
+ "Tell me about mathematical symbols"
]
},
{
@@ -560,26 +652,35 @@
"| Provider | Environment variable | Set? | Models |\n",
"|----------|----------------------|------|--------|\n",
"| `ai21` | `AI21_API_KEY` | ✅ | `ai21:j1-large`, `ai21:j1-grande`, `ai21:j1-jumbo`, `ai21:j1-grande-instruct`, `ai21:j2-large`, `ai21:j2-grande`, `ai21:j2-jumbo`, `ai21:j2-grande-instruct`, `ai21:j2-jumbo-instruct` |\n",
- "| `anthropic` | `ANTHROPIC_API_KEY` | ✅ | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0` |\n",
- "| `cohere` | `COHERE_API_KEY` | ✅ | `cohere:medium`, `cohere:xlarge` |\n",
- "| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See https://huggingface.co/models for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
- "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:text-davinci-003`, `openai:text-davinci-002`, `openai:text-curie-001`, `openai:text-babbage-001`, `openai:text-ada-001`, `openai:davinci`, `openai:curie`, `openai:babbage`, `openai:ada` |\n",
- "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-4`, `openai-chat:gpt-4-0314`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0314`, `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301` |\n",
- "| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must include the `--region_name`, `--request_schema`, and the `--response_path` arguments. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
+ "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-text-express-v1`, `bedrock:ai21.j2-ultra-v1`, `bedrock:ai21.j2-mid-v1`, `bedrock:cohere.command-light-text-v14`, `bedrock:cohere.command-text-v14`, `bedrock:meta.llama2-13b-chat-v1`, `bedrock:meta.llama2-70b-chat-v1` |\n",
+ "| `bedrock-chat` | Not applicable. | N/A | `bedrock-chat:anthropic.claude-v1`, `bedrock-chat:anthropic.claude-v2`, `bedrock-chat:anthropic.claude-v2:1`, `bedrock-chat:anthropic.claude-instant-v1` |\n",
+ "| `anthropic` | `ANTHROPIC_API_KEY` | ✅ | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-2.0`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0`, `anthropic:claude-instant-v1.2` |\n",
+ "| `anthropic-chat` | `ANTHROPIC_API_KEY` | ✅ | `anthropic-chat:claude-v1`, `anthropic-chat:claude-v1.0`, `anthropic-chat:claude-v1.2`, `anthropic-chat:claude-2`, `anthropic-chat:claude-2.0`, `anthropic-chat:claude-instant-v1`, `anthropic-chat:claude-instant-v1.0`, `anthropic-chat:claude-instant-v1.2` |\n",
+ "| `azure-chat-openai` | `OPENAI_API_KEY` | ✅ | This provider does not define a list of models. |\n",
+ "| `cohere` | `COHERE_API_KEY` | ✅ | `cohere:command`, `cohere:command-nightly`, `cohere:command-light`, `cohere:command-light-nightly` |\n",
+ "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n",
+ "| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
+ "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n",
+ "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
+ "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ✅ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
+ "| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
"\n",
"Aliases and custom commands:\n",
"\n",
"| Name | Target |\n",
"|------|--------|\n",
"| `gpt2` | `huggingface_hub:gpt2` |\n",
- "| `gpt3` | `openai:text-davinci-003` |\n",
+ "| `gpt3` | `openai:davinci-002` |\n",
"| `chatgpt` | `openai-chat:gpt-3.5-turbo` |\n",
"| `gpt4` | `openai-chat:gpt-4` |\n",
- "| `mychat` | `openai:text-davinci-003` |\n"
+ "| `ernie-bot` | `qianfan:ERNIE-Bot` |\n",
+ "| `ernie-bot-4` | `qianfan:ERNIE-Bot-4` |\n",
+ "| `titan` | `bedrock:amazon.titan-tg1-large` |\n",
+ "| `mychat` | `openai:babbage-002` |\n"
],
"text/plain": [
"ai21\n",
- "Requires environment variable AI21_API_KEY (set)\n",
+ "Requires environment variable: AI21_API_KEY (set)\n",
"* ai21:j1-large\n",
"* ai21:j1-grande\n",
"* ai21:j1-jumbo\n",
@@ -590,54 +691,112 @@
"* ai21:j2-grande-instruct\n",
"* ai21:j2-jumbo-instruct\n",
"\n",
+ "bedrock\n",
+ "* bedrock:amazon.titan-text-express-v1\n",
+ "* bedrock:ai21.j2-ultra-v1\n",
+ "* bedrock:ai21.j2-mid-v1\n",
+ "* bedrock:cohere.command-light-text-v14\n",
+ "* bedrock:cohere.command-text-v14\n",
+ "* bedrock:meta.llama2-13b-chat-v1\n",
+ "* bedrock:meta.llama2-70b-chat-v1\n",
+ "\n",
+ "bedrock-chat\n",
+ "* bedrock-chat:anthropic.claude-v1\n",
+ "* bedrock-chat:anthropic.claude-v2\n",
+ "* bedrock-chat:anthropic.claude-v2:1\n",
+ "* bedrock-chat:anthropic.claude-instant-v1\n",
+ "\n",
"anthropic\n",
- "Requires environment variable ANTHROPIC_API_KEY (set)\n",
+ "Requires environment variable: ANTHROPIC_API_KEY (set)\n",
"* anthropic:claude-v1\n",
"* anthropic:claude-v1.0\n",
"* anthropic:claude-v1.2\n",
+ "* anthropic:claude-2\n",
+ "* anthropic:claude-2.0\n",
"* anthropic:claude-instant-v1\n",
"* anthropic:claude-instant-v1.0\n",
+ "* anthropic:claude-instant-v1.2\n",
+ "\n",
+ "anthropic-chat\n",
+ "Requires environment variable: ANTHROPIC_API_KEY (set)\n",
+ "* anthropic-chat:claude-v1\n",
+ "* anthropic-chat:claude-v1.0\n",
+ "* anthropic-chat:claude-v1.2\n",
+ "* anthropic-chat:claude-2\n",
+ "* anthropic-chat:claude-2.0\n",
+ "* anthropic-chat:claude-instant-v1\n",
+ "* anthropic-chat:claude-instant-v1.0\n",
+ "* anthropic-chat:claude-instant-v1.2\n",
+ "\n",
+ "azure-chat-openai\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
+ "* This provider does not define a list of models.\n",
"\n",
"cohere\n",
- "Requires environment variable COHERE_API_KEY (set)\n",
- "* cohere:medium\n",
- "* cohere:xlarge\n",
+ "Requires environment variable: COHERE_API_KEY (set)\n",
+ "* cohere:command\n",
+ "* cohere:command-nightly\n",
+ "* cohere:command-light\n",
+ "* cohere:command-light-nightly\n",
+ "\n",
+ "gpt4all\n",
+ "* gpt4all:ggml-gpt4all-j-v1.2-jazzy\n",
+ "* gpt4all:ggml-gpt4all-j-v1.3-groovy\n",
+ "* gpt4all:ggml-gpt4all-l13b-snoozy\n",
+ "* gpt4all:mistral-7b-openorca.Q4_0\n",
+ "* gpt4all:mistral-7b-instruct-v0.1.Q4_0\n",
+ "* gpt4all:gpt4all-falcon-q4_0\n",
+ "* gpt4all:wizardlm-13b-v1.2.Q4_0\n",
+ "* gpt4all:nous-hermes-llama2-13b.Q4_0\n",
+ "* gpt4all:gpt4all-13b-snoozy-q4_0\n",
+ "* gpt4all:mpt-7b-chat-merges-q4_0\n",
+ "* gpt4all:orca-mini-3b-gguf2-q4_0\n",
+ "* gpt4all:starcoder-q4_0\n",
+ "* gpt4all:rift-coder-v0-7b-q4_0\n",
+ "* gpt4all:em_german_mistral_v01.Q4_0\n",
"\n",
"huggingface_hub\n",
- "Requires environment variable HUGGINGFACEHUB_API_TOKEN (set)\n",
- "* See https://huggingface.co/models for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`.\n",
+ "Requires environment variable: HUGGINGFACEHUB_API_TOKEN (set)\n",
+ "* See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`.\n",
"\n",
"openai\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
- "* openai:text-davinci-003\n",
- "* openai:text-davinci-002\n",
- "* openai:text-curie-001\n",
- "* openai:text-babbage-001\n",
- "* openai:text-ada-001\n",
- "* openai:davinci\n",
- "* openai:curie\n",
- "* openai:babbage\n",
- "* openai:ada\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
+ "* openai:babbage-002\n",
+ "* openai:davinci-002\n",
+ "* openai:gpt-3.5-turbo-instruct\n",
"\n",
"openai-chat\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
- "* openai-chat:gpt-4\n",
- "* openai-chat:gpt-4-0314\n",
- "* openai-chat:gpt-4-32k\n",
- "* openai-chat:gpt-4-32k-0314\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
"* openai-chat:gpt-3.5-turbo\n",
+ "* openai-chat:gpt-3.5-turbo-1106\n",
+ "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
+ "* openai-chat:gpt-3.5-turbo-0613\n",
+ "* openai-chat:gpt-3.5-turbo-16k-0613\n",
+ "* openai-chat:gpt-4\n",
+ "* openai-chat:gpt-4-0613\n",
+ "* openai-chat:gpt-4-32k\n",
+ "* openai-chat:gpt-4-32k-0613\n",
+ "* openai-chat:gpt-4-1106-preview\n",
+ "\n",
+ "qianfan\n",
+ "Requires environment variables: QIANFAN_AK (set), QIANFAN_SK (set)\n",
+ "* qianfan:ERNIE-Bot\n",
+ "* qianfan:ERNIE-Bot-4\n",
"\n",
"sagemaker-endpoint\n",
- "* Specify an endpoint name as the model ID. In addition, you must include the `--region_name`, `--request_schema`, and the `--response_path` arguments. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints).\n",
+ "* Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints).\n",
"\n",
"\n",
"Aliases and custom commands:\n",
"gpt2 - huggingface_hub:gpt2\n",
- "gpt3 - openai:text-davinci-003\n",
+ "gpt3 - openai:davinci-002\n",
"chatgpt - openai-chat:gpt-3.5-turbo\n",
"gpt4 - openai-chat:gpt-4\n",
- "mychat - openai:text-davinci-003\n"
+ "ernie-bot - qianfan:ERNIE-Bot\n",
+ "ernie-bot-4 - qianfan:ERNIE-Bot-4\n",
+ "titan - bedrock:amazon.titan-tg1-large\n",
+ "mychat - openai:babbage-002\n"
]
},
"execution_count": 11,
@@ -689,25 +848,34 @@
"| Provider | Environment variable | Set? | Models |\n",
"|----------|----------------------|------|--------|\n",
"| `ai21` | `AI21_API_KEY` | ✅ | `ai21:j1-large`, `ai21:j1-grande`, `ai21:j1-jumbo`, `ai21:j1-grande-instruct`, `ai21:j2-large`, `ai21:j2-grande`, `ai21:j2-jumbo`, `ai21:j2-grande-instruct`, `ai21:j2-jumbo-instruct` |\n",
- "| `anthropic` | `ANTHROPIC_API_KEY` | ✅ | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0` |\n",
- "| `cohere` | `COHERE_API_KEY` | ✅ | `cohere:medium`, `cohere:xlarge` |\n",
- "| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See https://huggingface.co/models for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
- "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:text-davinci-003`, `openai:text-davinci-002`, `openai:text-curie-001`, `openai:text-babbage-001`, `openai:text-ada-001`, `openai:davinci`, `openai:curie`, `openai:babbage`, `openai:ada` |\n",
- "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-4`, `openai-chat:gpt-4-0314`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0314`, `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301` |\n",
- "| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must include the `--region_name`, `--request_schema`, and the `--response_path` arguments. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
+ "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-text-express-v1`, `bedrock:ai21.j2-ultra-v1`, `bedrock:ai21.j2-mid-v1`, `bedrock:cohere.command-light-text-v14`, `bedrock:cohere.command-text-v14`, `bedrock:meta.llama2-13b-chat-v1`, `bedrock:meta.llama2-70b-chat-v1` |\n",
+ "| `bedrock-chat` | Not applicable. | N/A | `bedrock-chat:anthropic.claude-v1`, `bedrock-chat:anthropic.claude-v2`, `bedrock-chat:anthropic.claude-v2:1`, `bedrock-chat:anthropic.claude-instant-v1` |\n",
+ "| `anthropic` | `ANTHROPIC_API_KEY` | ✅ | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-2.0`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0`, `anthropic:claude-instant-v1.2` |\n",
+ "| `anthropic-chat` | `ANTHROPIC_API_KEY` | ✅ | `anthropic-chat:claude-v1`, `anthropic-chat:claude-v1.0`, `anthropic-chat:claude-v1.2`, `anthropic-chat:claude-2`, `anthropic-chat:claude-2.0`, `anthropic-chat:claude-instant-v1`, `anthropic-chat:claude-instant-v1.0`, `anthropic-chat:claude-instant-v1.2` |\n",
+ "| `azure-chat-openai` | `OPENAI_API_KEY` | ✅ | This provider does not define a list of models. |\n",
+ "| `cohere` | `COHERE_API_KEY` | ✅ | `cohere:command`, `cohere:command-nightly`, `cohere:command-light`, `cohere:command-light-nightly` |\n",
+ "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n",
+ "| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
+ "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n",
+ "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
+ "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ✅ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
+ "| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
"\n",
"Aliases and custom commands:\n",
"\n",
"| Name | Target |\n",
"|------|--------|\n",
"| `gpt2` | `huggingface_hub:gpt2` |\n",
- "| `gpt3` | `openai:text-davinci-003` |\n",
+ "| `gpt3` | `openai:davinci-002` |\n",
"| `chatgpt` | `openai-chat:gpt-3.5-turbo` |\n",
- "| `gpt4` | `openai-chat:gpt-4` |\n"
+ "| `gpt4` | `openai-chat:gpt-4` |\n",
+ "| `ernie-bot` | `qianfan:ERNIE-Bot` |\n",
+ "| `ernie-bot-4` | `qianfan:ERNIE-Bot-4` |\n",
+ "| `titan` | `bedrock:amazon.titan-tg1-large` |\n"
],
"text/plain": [
"ai21\n",
- "Requires environment variable AI21_API_KEY (set)\n",
+ "Requires environment variable: AI21_API_KEY (set)\n",
"* ai21:j1-large\n",
"* ai21:j1-grande\n",
"* ai21:j1-jumbo\n",
@@ -718,53 +886,111 @@
"* ai21:j2-grande-instruct\n",
"* ai21:j2-jumbo-instruct\n",
"\n",
+ "bedrock\n",
+ "* bedrock:amazon.titan-text-express-v1\n",
+ "* bedrock:ai21.j2-ultra-v1\n",
+ "* bedrock:ai21.j2-mid-v1\n",
+ "* bedrock:cohere.command-light-text-v14\n",
+ "* bedrock:cohere.command-text-v14\n",
+ "* bedrock:meta.llama2-13b-chat-v1\n",
+ "* bedrock:meta.llama2-70b-chat-v1\n",
+ "\n",
+ "bedrock-chat\n",
+ "* bedrock-chat:anthropic.claude-v1\n",
+ "* bedrock-chat:anthropic.claude-v2\n",
+ "* bedrock-chat:anthropic.claude-v2:1\n",
+ "* bedrock-chat:anthropic.claude-instant-v1\n",
+ "\n",
"anthropic\n",
- "Requires environment variable ANTHROPIC_API_KEY (set)\n",
+ "Requires environment variable: ANTHROPIC_API_KEY (set)\n",
"* anthropic:claude-v1\n",
"* anthropic:claude-v1.0\n",
"* anthropic:claude-v1.2\n",
+ "* anthropic:claude-2\n",
+ "* anthropic:claude-2.0\n",
"* anthropic:claude-instant-v1\n",
"* anthropic:claude-instant-v1.0\n",
+ "* anthropic:claude-instant-v1.2\n",
+ "\n",
+ "anthropic-chat\n",
+ "Requires environment variable: ANTHROPIC_API_KEY (set)\n",
+ "* anthropic-chat:claude-v1\n",
+ "* anthropic-chat:claude-v1.0\n",
+ "* anthropic-chat:claude-v1.2\n",
+ "* anthropic-chat:claude-2\n",
+ "* anthropic-chat:claude-2.0\n",
+ "* anthropic-chat:claude-instant-v1\n",
+ "* anthropic-chat:claude-instant-v1.0\n",
+ "* anthropic-chat:claude-instant-v1.2\n",
+ "\n",
+ "azure-chat-openai\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
+ "* This provider does not define a list of models.\n",
"\n",
"cohere\n",
- "Requires environment variable COHERE_API_KEY (set)\n",
- "* cohere:medium\n",
- "* cohere:xlarge\n",
+ "Requires environment variable: COHERE_API_KEY (set)\n",
+ "* cohere:command\n",
+ "* cohere:command-nightly\n",
+ "* cohere:command-light\n",
+ "* cohere:command-light-nightly\n",
+ "\n",
+ "gpt4all\n",
+ "* gpt4all:ggml-gpt4all-j-v1.2-jazzy\n",
+ "* gpt4all:ggml-gpt4all-j-v1.3-groovy\n",
+ "* gpt4all:ggml-gpt4all-l13b-snoozy\n",
+ "* gpt4all:mistral-7b-openorca.Q4_0\n",
+ "* gpt4all:mistral-7b-instruct-v0.1.Q4_0\n",
+ "* gpt4all:gpt4all-falcon-q4_0\n",
+ "* gpt4all:wizardlm-13b-v1.2.Q4_0\n",
+ "* gpt4all:nous-hermes-llama2-13b.Q4_0\n",
+ "* gpt4all:gpt4all-13b-snoozy-q4_0\n",
+ "* gpt4all:mpt-7b-chat-merges-q4_0\n",
+ "* gpt4all:orca-mini-3b-gguf2-q4_0\n",
+ "* gpt4all:starcoder-q4_0\n",
+ "* gpt4all:rift-coder-v0-7b-q4_0\n",
+ "* gpt4all:em_german_mistral_v01.Q4_0\n",
"\n",
"huggingface_hub\n",
- "Requires environment variable HUGGINGFACEHUB_API_TOKEN (set)\n",
- "* See https://huggingface.co/models for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`.\n",
+ "Requires environment variable: HUGGINGFACEHUB_API_TOKEN (set)\n",
+ "* See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`.\n",
"\n",
"openai\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
- "* openai:text-davinci-003\n",
- "* openai:text-davinci-002\n",
- "* openai:text-curie-001\n",
- "* openai:text-babbage-001\n",
- "* openai:text-ada-001\n",
- "* openai:davinci\n",
- "* openai:curie\n",
- "* openai:babbage\n",
- "* openai:ada\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
+ "* openai:babbage-002\n",
+ "* openai:davinci-002\n",
+ "* openai:gpt-3.5-turbo-instruct\n",
"\n",
"openai-chat\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
- "* openai-chat:gpt-4\n",
- "* openai-chat:gpt-4-0314\n",
- "* openai-chat:gpt-4-32k\n",
- "* openai-chat:gpt-4-32k-0314\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
"* openai-chat:gpt-3.5-turbo\n",
+ "* openai-chat:gpt-3.5-turbo-1106\n",
+ "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
+ "* openai-chat:gpt-3.5-turbo-0613\n",
+ "* openai-chat:gpt-3.5-turbo-16k-0613\n",
+ "* openai-chat:gpt-4\n",
+ "* openai-chat:gpt-4-0613\n",
+ "* openai-chat:gpt-4-32k\n",
+ "* openai-chat:gpt-4-32k-0613\n",
+ "* openai-chat:gpt-4-1106-preview\n",
+ "\n",
+ "qianfan\n",
+ "Requires environment variables: QIANFAN_AK (set), QIANFAN_SK (set)\n",
+ "* qianfan:ERNIE-Bot\n",
+ "* qianfan:ERNIE-Bot-4\n",
"\n",
"sagemaker-endpoint\n",
- "* Specify an endpoint name as the model ID. In addition, you must include the `--region_name`, `--request_schema`, and the `--response_path` arguments. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints).\n",
+ "* Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints).\n",
"\n",
"\n",
"Aliases and custom commands:\n",
"gpt2 - huggingface_hub:gpt2\n",
- "gpt3 - openai:text-davinci-003\n",
+ "gpt3 - openai:davinci-002\n",
"chatgpt - openai-chat:gpt-3.5-turbo\n",
- "gpt4 - openai-chat:gpt-4\n"
+ "gpt4 - openai-chat:gpt-4\n",
+ "ernie-bot - qianfan:ERNIE-Bot\n",
+ "ernie-bot-4 - qianfan:ERNIE-Bot-4\n",
+ "titan - bedrock:amazon.titan-tg1-large\n"
]
},
"execution_count": 13,
@@ -797,12 +1023,12 @@
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
- "from langchain.llms import OpenAI\n",
+ "from langchain_community.llms import Cohere\n",
"\n",
- "llm = OpenAI(temperature=0.9)\n",
+ "llm = Cohere(model=\"command\", max_tokens=256, temperature=0.75)\n",
"prompt = PromptTemplate(\n",
" input_variables=[\"product\"],\n",
- " template=\"What is a good name for a company that makes {product}?\",\n",
+ " template=\"What is a good name for a company that makes {product}? Provide only one name. Do not provide any other text than the name. Do not provide other info\",\n",
")\n",
"chain = LLMChain(llm=llm, prompt=prompt)"
]
@@ -810,19 +1036,6 @@
{
"cell_type": "code",
"execution_count": 15,
- "id": "29d5239f-7601-405e-b059-4e881ebf7ab1",
- "metadata": {
- "tags": []
- },
- "outputs": [],
- "source": [
- "from langchain.chains import LLMChain\n",
- "chain = LLMChain(llm=llm, prompt=prompt)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 16,
"id": "43e7a77c-93af-4ef7-a104-f932c9f54183",
"metadata": {
"tags": []
@@ -832,20 +1045,18 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\n",
- "\n",
- "Bright Toes Socks.\n"
+ "{'product': 'colorful socks', 'text': ' Chroma Sox'}\n"
]
}
],
"source": [
"# Run the chain only specifying the input variable.\n",
- "print(chain.run(\"colorful socks\"))"
+ "print(chain.invoke(\"colorful socks\"))"
]
},
{
"cell_type": "code",
- "execution_count": 17,
+ "execution_count": 16,
"id": "9badc567-9720-4e33-ab4a-54fda5129f36",
"metadata": {
"tags": []
@@ -860,7 +1071,7 @@
"Registered new alias `company`"
]
},
- "execution_count": 17,
+ "execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
@@ -871,7 +1082,7 @@
},
{
"cell_type": "code",
- "execution_count": 18,
+ "execution_count": 17,
"id": "92b75d71-8844-4872-b424-b0023706abb1",
"metadata": {
"tags": []
@@ -883,26 +1094,35 @@
"| Provider | Environment variable | Set? | Models |\n",
"|----------|----------------------|------|--------|\n",
"| `ai21` | `AI21_API_KEY` | ✅ | `ai21:j1-large`, `ai21:j1-grande`, `ai21:j1-jumbo`, `ai21:j1-grande-instruct`, `ai21:j2-large`, `ai21:j2-grande`, `ai21:j2-jumbo`, `ai21:j2-grande-instruct`, `ai21:j2-jumbo-instruct` |\n",
- "| `anthropic` | `ANTHROPIC_API_KEY` | ✅ | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0` |\n",
- "| `cohere` | `COHERE_API_KEY` | ✅ | `cohere:medium`, `cohere:xlarge` |\n",
- "| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See https://huggingface.co/models for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
- "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:text-davinci-003`, `openai:text-davinci-002`, `openai:text-curie-001`, `openai:text-babbage-001`, `openai:text-ada-001`, `openai:davinci`, `openai:curie`, `openai:babbage`, `openai:ada` |\n",
- "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-4`, `openai-chat:gpt-4-0314`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0314`, `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301` |\n",
- "| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must include the `--region_name`, `--request_schema`, and the `--response_path` arguments. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
+ "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-text-express-v1`, `bedrock:ai21.j2-ultra-v1`, `bedrock:ai21.j2-mid-v1`, `bedrock:cohere.command-light-text-v14`, `bedrock:cohere.command-text-v14`, `bedrock:meta.llama2-13b-chat-v1`, `bedrock:meta.llama2-70b-chat-v1` |\n",
+ "| `bedrock-chat` | Not applicable. | N/A | `bedrock-chat:anthropic.claude-v1`, `bedrock-chat:anthropic.claude-v2`, `bedrock-chat:anthropic.claude-v2:1`, `bedrock-chat:anthropic.claude-instant-v1` |\n",
+ "| `anthropic` | `ANTHROPIC_API_KEY` | ✅ | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-2.0`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0`, `anthropic:claude-instant-v1.2` |\n",
+ "| `anthropic-chat` | `ANTHROPIC_API_KEY` | ✅ | `anthropic-chat:claude-v1`, `anthropic-chat:claude-v1.0`, `anthropic-chat:claude-v1.2`, `anthropic-chat:claude-2`, `anthropic-chat:claude-2.0`, `anthropic-chat:claude-instant-v1`, `anthropic-chat:claude-instant-v1.0`, `anthropic-chat:claude-instant-v1.2` |\n",
+ "| `azure-chat-openai` | `OPENAI_API_KEY` | ✅ | This provider does not define a list of models. |\n",
+ "| `cohere` | `COHERE_API_KEY` | ✅ | `cohere:command`, `cohere:command-nightly`, `cohere:command-light`, `cohere:command-light-nightly` |\n",
+ "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n",
+ "| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
+ "| `openai` | `OPENAI_API_KEY` | ✅ | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n",
+ "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
+ "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ✅ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
+ "| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
"\n",
"Aliases and custom commands:\n",
"\n",
"| Name | Target |\n",
"|------|--------|\n",
"| `gpt2` | `huggingface_hub:gpt2` |\n",
- "| `gpt3` | `openai:text-davinci-003` |\n",
+ "| `gpt3` | `openai:davinci-002` |\n",
"| `chatgpt` | `openai-chat:gpt-3.5-turbo` |\n",
"| `gpt4` | `openai-chat:gpt-4` |\n",
+ "| `ernie-bot` | `qianfan:ERNIE-Bot` |\n",
+ "| `ernie-bot-4` | `qianfan:ERNIE-Bot-4` |\n",
+ "| `titan` | `bedrock:amazon.titan-tg1-large` |\n",
"| `company` | *custom chain* |\n"
],
"text/plain": [
"ai21\n",
- "Requires environment variable AI21_API_KEY (set)\n",
+ "Requires environment variable: AI21_API_KEY (set)\n",
"* ai21:j1-large\n",
"* ai21:j1-grande\n",
"* ai21:j1-jumbo\n",
@@ -913,57 +1133,115 @@
"* ai21:j2-grande-instruct\n",
"* ai21:j2-jumbo-instruct\n",
"\n",
+ "bedrock\n",
+ "* bedrock:amazon.titan-text-express-v1\n",
+ "* bedrock:ai21.j2-ultra-v1\n",
+ "* bedrock:ai21.j2-mid-v1\n",
+ "* bedrock:cohere.command-light-text-v14\n",
+ "* bedrock:cohere.command-text-v14\n",
+ "* bedrock:meta.llama2-13b-chat-v1\n",
+ "* bedrock:meta.llama2-70b-chat-v1\n",
+ "\n",
+ "bedrock-chat\n",
+ "* bedrock-chat:anthropic.claude-v1\n",
+ "* bedrock-chat:anthropic.claude-v2\n",
+ "* bedrock-chat:anthropic.claude-v2:1\n",
+ "* bedrock-chat:anthropic.claude-instant-v1\n",
+ "\n",
"anthropic\n",
- "Requires environment variable ANTHROPIC_API_KEY (set)\n",
+ "Requires environment variable: ANTHROPIC_API_KEY (set)\n",
"* anthropic:claude-v1\n",
"* anthropic:claude-v1.0\n",
"* anthropic:claude-v1.2\n",
+ "* anthropic:claude-2\n",
+ "* anthropic:claude-2.0\n",
"* anthropic:claude-instant-v1\n",
"* anthropic:claude-instant-v1.0\n",
+ "* anthropic:claude-instant-v1.2\n",
+ "\n",
+ "anthropic-chat\n",
+ "Requires environment variable: ANTHROPIC_API_KEY (set)\n",
+ "* anthropic-chat:claude-v1\n",
+ "* anthropic-chat:claude-v1.0\n",
+ "* anthropic-chat:claude-v1.2\n",
+ "* anthropic-chat:claude-2\n",
+ "* anthropic-chat:claude-2.0\n",
+ "* anthropic-chat:claude-instant-v1\n",
+ "* anthropic-chat:claude-instant-v1.0\n",
+ "* anthropic-chat:claude-instant-v1.2\n",
+ "\n",
+ "azure-chat-openai\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
+ "* This provider does not define a list of models.\n",
"\n",
"cohere\n",
- "Requires environment variable COHERE_API_KEY (set)\n",
- "* cohere:medium\n",
- "* cohere:xlarge\n",
+ "Requires environment variable: COHERE_API_KEY (set)\n",
+ "* cohere:command\n",
+ "* cohere:command-nightly\n",
+ "* cohere:command-light\n",
+ "* cohere:command-light-nightly\n",
+ "\n",
+ "gpt4all\n",
+ "* gpt4all:ggml-gpt4all-j-v1.2-jazzy\n",
+ "* gpt4all:ggml-gpt4all-j-v1.3-groovy\n",
+ "* gpt4all:ggml-gpt4all-l13b-snoozy\n",
+ "* gpt4all:mistral-7b-openorca.Q4_0\n",
+ "* gpt4all:mistral-7b-instruct-v0.1.Q4_0\n",
+ "* gpt4all:gpt4all-falcon-q4_0\n",
+ "* gpt4all:wizardlm-13b-v1.2.Q4_0\n",
+ "* gpt4all:nous-hermes-llama2-13b.Q4_0\n",
+ "* gpt4all:gpt4all-13b-snoozy-q4_0\n",
+ "* gpt4all:mpt-7b-chat-merges-q4_0\n",
+ "* gpt4all:orca-mini-3b-gguf2-q4_0\n",
+ "* gpt4all:starcoder-q4_0\n",
+ "* gpt4all:rift-coder-v0-7b-q4_0\n",
+ "* gpt4all:em_german_mistral_v01.Q4_0\n",
"\n",
"huggingface_hub\n",
- "Requires environment variable HUGGINGFACEHUB_API_TOKEN (set)\n",
- "* See https://huggingface.co/models for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`.\n",
+ "Requires environment variable: HUGGINGFACEHUB_API_TOKEN (set)\n",
+ "* See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`.\n",
"\n",
"openai\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
- "* openai:text-davinci-003\n",
- "* openai:text-davinci-002\n",
- "* openai:text-curie-001\n",
- "* openai:text-babbage-001\n",
- "* openai:text-ada-001\n",
- "* openai:davinci\n",
- "* openai:curie\n",
- "* openai:babbage\n",
- "* openai:ada\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
+ "* openai:babbage-002\n",
+ "* openai:davinci-002\n",
+ "* openai:gpt-3.5-turbo-instruct\n",
"\n",
"openai-chat\n",
- "Requires environment variable OPENAI_API_KEY (set)\n",
- "* openai-chat:gpt-4\n",
- "* openai-chat:gpt-4-0314\n",
- "* openai-chat:gpt-4-32k\n",
- "* openai-chat:gpt-4-32k-0314\n",
+ "Requires environment variable: OPENAI_API_KEY (set)\n",
"* openai-chat:gpt-3.5-turbo\n",
+ "* openai-chat:gpt-3.5-turbo-1106\n",
+ "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
+ "* openai-chat:gpt-3.5-turbo-0613\n",
+ "* openai-chat:gpt-3.5-turbo-16k-0613\n",
+ "* openai-chat:gpt-4\n",
+ "* openai-chat:gpt-4-0613\n",
+ "* openai-chat:gpt-4-32k\n",
+ "* openai-chat:gpt-4-32k-0613\n",
+ "* openai-chat:gpt-4-1106-preview\n",
+ "\n",
+ "qianfan\n",
+ "Requires environment variables: QIANFAN_AK (set), QIANFAN_SK (set)\n",
+ "* qianfan:ERNIE-Bot\n",
+ "* qianfan:ERNIE-Bot-4\n",
"\n",
"sagemaker-endpoint\n",
- "* Specify an endpoint name as the model ID. In addition, you must include the `--region_name`, `--request_schema`, and the `--response_path` arguments. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints).\n",
+ "* Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints).\n",
"\n",
"\n",
"Aliases and custom commands:\n",
"gpt2 - huggingface_hub:gpt2\n",
- "gpt3 - openai:text-davinci-003\n",
+ "gpt3 - openai:davinci-002\n",
"chatgpt - openai-chat:gpt-3.5-turbo\n",
"gpt4 - openai-chat:gpt-4\n",
+ "ernie-bot - qianfan:ERNIE-Bot\n",
+ "ernie-bot-4 - qianfan:ERNIE-Bot-4\n",
+ "titan - bedrock:amazon.titan-tg1-large\n",
"company - custom chain\n"
]
},
- "execution_count": 18,
+ "execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
@@ -974,22 +1252,28 @@
},
{
"cell_type": "code",
- "execution_count": 19,
+ "execution_count": 18,
"id": "cfef0fee-a7c6-49e4-8d90-9aa12f7b91d1",
"metadata": {},
"outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/opt/miniconda3/envs/jupyter-ai-jl4/lib/python3.11/site-packages/langchain_core/_api/deprecation.py:117: LangChainDeprecationWarning: The function `run` was deprecated in LangChain 0.1.0 and will be removed in 0.2.0. Use invoke instead.\n",
+ " warn_deprecated(\n"
+ ]
+ },
{
"data": {
"text/markdown": [
- "\n",
- "\n",
- "**Brightsocks**"
+ " Chroma Socks "
],
"text/plain": [
""
]
},
- "execution_count": 19,
+ "execution_count": 18,
"metadata": {
"text/markdown": {
"jupyter_ai": {
@@ -1007,19 +1291,17 @@
},
{
"cell_type": "code",
- "execution_count": 20,
+ "execution_count": 19,
"id": "06c698e7-e2cf-41b5-88de-2be4d3b60eba",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "\n",
- "\n",
- "FunkySox."
+ " Chroma Socks "
]
},
- "execution_count": 20,
+ "execution_count": 19,
"metadata": {
"jupyter_ai": {
"custom_chain_id": "company"
diff --git a/packages/jupyter-ai-magics/jupyter_ai_magics/aliases.py b/packages/jupyter-ai-magics/jupyter_ai_magics/aliases.py
index 96cac4efe..f34826428 100644
--- a/packages/jupyter-ai-magics/jupyter_ai_magics/aliases.py
+++ b/packages/jupyter-ai-magics/jupyter_ai_magics/aliases.py
@@ -1,6 +1,6 @@
MODEL_ID_ALIASES = {
"gpt2": "huggingface_hub:gpt2",
- "gpt3": "openai:text-davinci-003",
+ "gpt3": "openai:davinci-002",
"chatgpt": "openai-chat:gpt-3.5-turbo",
"gpt4": "openai-chat:gpt-4",
"ernie-bot": "qianfan:ERNIE-Bot",
diff --git a/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py b/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py
index 21e766c3b..5762f2560 100644
--- a/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py
+++ b/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py
@@ -536,18 +536,7 @@ async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
class OpenAIProvider(BaseProvider, OpenAI):
id = "openai"
name = "OpenAI"
- models = [
- "text-davinci-003",
- "text-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001",
- "gpt-3.5-turbo-instruct",
- "davinci",
- "curie",
- "babbage",
- "ada",
- ]
+ models = ["babbage-002", "davinci-002", "gpt-3.5-turbo-instruct"]
model_id_key = "model_name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
@@ -570,15 +559,14 @@ class ChatOpenAIProvider(BaseProvider, ChatOpenAI):
name = "OpenAI"
models = [
"gpt-3.5-turbo",
+ "gpt-3.5-turbo-0301", # Deprecated as of 2024-06-13
+ "gpt-3.5-turbo-0613", # Deprecated as of 2024-06-13
+ "gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0301",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-16k-0613",
+ "gpt-3.5-turbo-16k-0613", # Deprecated as of 2024-06-13
"gpt-4",
- "gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
- "gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-1106-preview",
]
diff --git a/packages/jupyter-ai-module-cookiecutter/{{cookiecutter.labextension_name}}/{{cookiecutter.python_name}}/engine.py b/packages/jupyter-ai-module-cookiecutter/{{cookiecutter.labextension_name}}/{{cookiecutter.python_name}}/engine.py
index c32e86148..63066ef07 100644
--- a/packages/jupyter-ai-module-cookiecutter/{{cookiecutter.labextension_name}}/{{cookiecutter.python_name}}/engine.py
+++ b/packages/jupyter-ai-module-cookiecutter/{{cookiecutter.labextension_name}}/{{cookiecutter.python_name}}/engine.py
@@ -29,7 +29,7 @@ async def execute(
# prompt = task.prompt_template.format(**prompt_variables)
# openai.api_key = self.api_key
# response = openai.Completion.create(
- # model="text-davinci-003",
+ # model="davinci-002",
# prompt=prompt,
# ...
# )
From ca03e2c9a846344c379d50ccbe8323f7e0c7b1ed Mon Sep 17 00:00:00 2001
From: Garson R Chow <58149459+garsonbyte@users.noreply.github.com>
Date: Tue, 23 Jan 2024 18:20:25 -0500
Subject: [PATCH 2/5] Reflect theme changes without a refresh (#575)
* Reflect theme changes without a refresh
By leveraging `IThemeManager.themeChanged`, we can listen on theme change signals
and rebuild the theme object in response. This allows CSS variable changes to reflect
in the MUI theme without having to refresh the page.
* update yarn.lock
* pass themeManager as a prop instead of using top-level scope
* remove theme hack added in #192
---------
Co-authored-by: gchow
Co-authored-by: David L. Qiu
---
packages/jupyter-ai/src/components/chat.tsx | 4 +++-
.../src/components/jl-theme-provider.tsx | 6 +++++-
packages/jupyter-ai/src/index.ts | 16 +++++++++++-----
packages/jupyter-ai/src/theme-provider.ts | 8 +-------
packages/jupyter-ai/src/widgets/chat-error.tsx | 9 ++++++---
packages/jupyter-ai/src/widgets/chat-sidebar.tsx | 5 ++++-
yarn.lock | 4 ++--
7 files changed, 32 insertions(+), 20 deletions(-)
diff --git a/packages/jupyter-ai/src/components/chat.tsx b/packages/jupyter-ai/src/components/chat.tsx
index ded339c70..53ba45f1a 100644
--- a/packages/jupyter-ai/src/components/chat.tsx
+++ b/packages/jupyter-ai/src/components/chat.tsx
@@ -4,6 +4,7 @@ import { Button, IconButton, Stack } from '@mui/material';
import SettingsIcon from '@mui/icons-material/Settings';
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
import type { Awareness } from 'y-protocols/awareness';
+import type { IThemeManager } from '@jupyterlab/apputils';
import { JlThemeProvider } from './jl-theme-provider';
import { ChatMessages } from './chat-messages';
@@ -178,6 +179,7 @@ export type ChatProps = {
selectionWatcher: SelectionWatcher;
chatHandler: ChatHandler;
globalAwareness: Awareness | null;
+ themeManager: IThemeManager | null;
chatView?: ChatView;
};
@@ -190,7 +192,7 @@ export function Chat(props: ChatProps): JSX.Element {
const [view, setView] = useState(props.chatView || ChatView.Chat);
return (
-
+
(createTheme());
@@ -12,7 +14,9 @@ export function JlThemeProvider(props: {
async function setJlTheme() {
setTheme(await getJupyterLabTheme());
}
+
setJlTheme();
+ props.themeManager?.themeChanged.connect(setJlTheme);
}, []);
return {props.children};
diff --git a/packages/jupyter-ai/src/index.ts b/packages/jupyter-ai/src/index.ts
index f6832f878..807629eae 100644
--- a/packages/jupyter-ai/src/index.ts
+++ b/packages/jupyter-ai/src/index.ts
@@ -4,7 +4,11 @@ import {
ILayoutRestorer
} from '@jupyterlab/application';
-import { IWidgetTracker, ReactWidget } from '@jupyterlab/apputils';
+import {
+ IWidgetTracker,
+ ReactWidget,
+ IThemeManager
+} from '@jupyterlab/apputils';
import { IDocumentWidget } from '@jupyterlab/docregistry';
import { IGlobalAwareness } from '@jupyter/collaboration';
import type { Awareness } from 'y-protocols/awareness';
@@ -23,11 +27,12 @@ export type DocumentTracker = IWidgetTracker;
const plugin: JupyterFrontEndPlugin = {
id: 'jupyter_ai:plugin',
autoStart: true,
- optional: [IGlobalAwareness, ILayoutRestorer],
+ optional: [IGlobalAwareness, ILayoutRestorer, IThemeManager],
activate: async (
app: JupyterFrontEnd,
globalAwareness: Awareness | null,
- restorer: ILayoutRestorer | null
+ restorer: ILayoutRestorer | null,
+ themeManager: IThemeManager | null
) => {
/**
* Initialize selection watcher singleton
@@ -45,10 +50,11 @@ const plugin: JupyterFrontEndPlugin = {
chatWidget = buildChatSidebar(
selectionWatcher,
chatHandler,
- globalAwareness
+ globalAwareness,
+ themeManager
);
} catch (e) {
- chatWidget = buildErrorWidget();
+ chatWidget = buildErrorWidget(themeManager);
}
/**
diff --git a/packages/jupyter-ai/src/theme-provider.ts b/packages/jupyter-ai/src/theme-provider.ts
index 405f08198..02db8d369 100644
--- a/packages/jupyter-ai/src/theme-provider.ts
+++ b/packages/jupyter-ai/src/theme-provider.ts
@@ -13,7 +13,6 @@ export async function pollUntilReady(): Promise {
export async function getJupyterLabTheme(): Promise {
await pollUntilReady();
const light = document.body.getAttribute('data-jp-theme-light');
- const primaryFontColor = getCSSVariable('--jp-ui-font-color1');
return createTheme({
spacing: 4,
components: {
@@ -113,7 +112,7 @@ export async function getJupyterLabTheme(): Promise {
dark: getCSSVariable('--jp-success-color0')
},
text: {
- primary: primaryFontColor,
+ primary: getCSSVariable('--jp-ui-font-color1'),
secondary: getCSSVariable('--jp-ui-font-color2'),
disabled: getCSSVariable('--jp-ui-font-color3')
}
@@ -127,11 +126,6 @@ export async function getJupyterLabTheme(): Promise {
htmlFontSize: 16,
button: {
textTransform: 'capitalize'
- },
- // this is undocumented as of the time of writing.
- // https://stackoverflow.com/a/62950304/12548458
- allVariants: {
- color: primaryFontColor
}
}
});
diff --git a/packages/jupyter-ai/src/widgets/chat-error.tsx b/packages/jupyter-ai/src/widgets/chat-error.tsx
index 3b8f8ef95..8ae9cbb44 100644
--- a/packages/jupyter-ai/src/widgets/chat-error.tsx
+++ b/packages/jupyter-ai/src/widgets/chat-error.tsx
@@ -1,13 +1,16 @@
import React from 'react';
import { ReactWidget } from '@jupyterlab/apputils';
+import type { IThemeManager } from '@jupyterlab/apputils';
+import { Alert, Box } from '@mui/material';
import { chatIcon } from '../icons';
-import { Alert, Box } from '@mui/material';
import { JlThemeProvider } from '../components/jl-theme-provider';
-export function buildErrorWidget(): ReactWidget {
+export function buildErrorWidget(
+ themeManager: IThemeManager | null
+): ReactWidget {
const ErrorWidget = ReactWidget.create(
-
+
);
ChatWidget.id = 'jupyter-ai::chat';
diff --git a/yarn.lock b/yarn.lock
index ddc039416..0df949cb2 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -14822,11 +14822,11 @@ __metadata:
"typescript@patch:typescript@^3 || ^4#~builtin, typescript@patch:typescript@~4.9.0#~builtin":
version: 4.9.5
- resolution: "typescript@patch:typescript@npm%3A4.9.5#~builtin::version=4.9.5&hash=23ec76"
+ resolution: "typescript@patch:typescript@npm%3A4.9.5#~builtin::version=4.9.5&hash=289587"
bin:
tsc: bin/tsc
tsserver: bin/tsserver
- checksum: ab417a2f398380c90a6cf5a5f74badd17866adf57f1165617d6a551f059c3ba0a3e4da0d147b3ac5681db9ac76a303c5876394b13b3de75fdd5b1eaa06181c9d
+ checksum: 1f8f3b6aaea19f0f67cba79057674ba580438a7db55057eb89cc06950483c5d632115c14077f6663ea76fd09fce3c190e6414bb98582ec80aa5a4eaf345d5b68
languageName: node
linkType: hard
From cde6d35dc7e522b25e5c9693fe657b71ea6603a5 Mon Sep 17 00:00:00 2001
From: SALES <5235127+adriens@users.noreply.github.com>
Date: Thu, 25 Jan 2024 07:07:58 +1100
Subject: [PATCH 3/5] Add Kaggle to supported platforms (#577)
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 5943bb23e..94e7af17b 100644
--- a/README.md
+++ b/README.md
@@ -7,7 +7,7 @@ and powerful way to explore generative AI models in notebooks and improve your p
in JupyterLab and the Jupyter Notebook. More specifically, Jupyter AI offers:
* An `%%ai` magic that turns the Jupyter notebook into a reproducible generative AI playground.
- This works anywhere the IPython kernel runs (JupyterLab, Jupyter Notebook, Google Colab, VSCode, etc.).
+ This works anywhere the IPython kernel runs (JupyterLab, Jupyter Notebook, Google Colab, Kaggle, VSCode, etc.).
* A native chat UI in JupyterLab that enables you to work with generative AI as a conversational assistant.
* Support for a wide range of generative model providers, including AI21, Anthropic, AWS, Cohere,
Hugging Face, and OpenAI.
From 9907cc62b79738c4025c5949a576d8954613a67a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Krassowski?=
<5832902+krassowski@users.noreply.github.com>
Date: Thu, 25 Jan 2024 14:19:26 +0000
Subject: [PATCH 4/5] Expose templates for customisation in providers (#581)
* Expose chat template for customisation in providers
* Enable completion template customization, including suffix
* Remove `inline` from "inline completion" as per review request
---
.../jupyter_ai_magics/providers.py | 100 +++++++++++++++++-
.../jupyter_ai/chat_handlers/default.py | 48 +--------
.../completions/handlers/default.py | 43 +-------
3 files changed, 105 insertions(+), 86 deletions(-)
diff --git a/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py b/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py
index 5762f2560..850e24a99 100644
--- a/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py
+++ b/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py
@@ -11,7 +11,13 @@
from langchain.chat_models.base import BaseChatModel
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain.llms.utils import enforce_stop_tokens
-from langchain.prompts import PromptTemplate
+from langchain.prompts import (
+ ChatPromptTemplate,
+ HumanMessagePromptTemplate,
+ MessagesPlaceholder,
+ PromptTemplate,
+ SystemMessagePromptTemplate,
+)
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema import LLMResult
from langchain.utils import get_from_dict_or_env
@@ -42,6 +48,49 @@
from pydantic.main import ModelMetaclass
+CHAT_SYSTEM_PROMPT = """
+You are Jupyternaut, a conversational assistant living in JupyterLab to help users.
+You are not a language model, but rather an application built on a foundation model from {provider_name} called {local_model_id}.
+You are talkative and you provide lots of specific details from the foundation model's context.
+You may use Markdown to format your response.
+Code blocks must be formatted in Markdown.
+Math should be rendered with inline TeX markup, surrounded by $.
+If you do not know the answer to a question, answer truthfully by responding that you do not know.
+The following is a friendly conversation between you and a human.
+""".strip()
+
+CHAT_DEFAULT_TEMPLATE = """Current conversation:
+{history}
+Human: {input}
+AI:"""
+
+
+COMPLETION_SYSTEM_PROMPT = """
+You are an application built to provide helpful code completion suggestions.
+You should only produce code. Keep comments to minimum, use the
+programming language comment syntax. Produce clean code.
+The code is written in JupyterLab, a data analysis and code development
+environment which can execute code extended with additional syntax for
+interactive features, such as magics.
+""".strip()
+
+# only add the suffix bit if present to save input tokens/computation time
+COMPLETION_DEFAULT_TEMPLATE = """
+The document is called `{{filename}}` and written in {{language}}.
+{% if suffix %}
+The code after the completion request is:
+
+```
+{{suffix}}
+```
+{% endif %}
+
+Complete the following code:
+
+```
+{{prefix}}"""
+
+
class EnvAuthStrategy(BaseModel):
"""Require one auth token via an environment variable."""
@@ -265,6 +314,55 @@ def get_prompt_template(self, format) -> PromptTemplate:
else:
return self.prompt_templates["text"] # Default to plain format
+ def get_chat_prompt_template(self) -> PromptTemplate:
+ """
+ Produce a prompt template optimised for chat conversation.
+ The template should take two variables: history and input.
+ """
+ name = self.__class__.name
+ if self.is_chat_provider:
+ return ChatPromptTemplate.from_messages(
+ [
+ SystemMessagePromptTemplate.from_template(
+ CHAT_SYSTEM_PROMPT
+ ).format(provider_name=name, local_model_id=self.model_id),
+ MessagesPlaceholder(variable_name="history"),
+ HumanMessagePromptTemplate.from_template("{input}"),
+ ]
+ )
+ else:
+ return PromptTemplate(
+ input_variables=["history", "input"],
+ template=CHAT_SYSTEM_PROMPT.format(
+ provider_name=name, local_model_id=self.model_id
+ )
+ + "\n\n"
+ + CHAT_DEFAULT_TEMPLATE,
+ )
+
+ def get_completion_prompt_template(self) -> PromptTemplate:
+ """
+ Produce a prompt template optimised for inline code or text completion.
+ The template should take variables: prefix, suffix, language, filename.
+ """
+ if self.is_chat_provider:
+ return ChatPromptTemplate.from_messages(
+ [
+ SystemMessagePromptTemplate.from_template(COMPLETION_SYSTEM_PROMPT),
+ HumanMessagePromptTemplate.from_template(
+ COMPLETION_DEFAULT_TEMPLATE, template_format="jinja2"
+ ),
+ ]
+ )
+ else:
+ return PromptTemplate(
+ input_variables=["prefix", "suffix", "language", "filename"],
+ template=COMPLETION_SYSTEM_PROMPT
+ + "\n\n"
+ + COMPLETION_DEFAULT_TEMPLATE,
+ template_format="jinja2",
+ )
+
@property
def is_chat_provider(self):
return isinstance(self, BaseChatModel)
diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py
index 0db83afdd..584f0b33f 100644
--- a/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py
+++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py
@@ -4,32 +4,9 @@
from jupyter_ai_magics.providers import BaseProvider
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferWindowMemory
-from langchain.prompts import (
- ChatPromptTemplate,
- HumanMessagePromptTemplate,
- MessagesPlaceholder,
- PromptTemplate,
- SystemMessagePromptTemplate,
-)
from .base import BaseChatHandler, SlashCommandRoutingType
-SYSTEM_PROMPT = """
-You are Jupyternaut, a conversational assistant living in JupyterLab to help users.
-You are not a language model, but rather an application built on a foundation model from {provider_name} called {local_model_id}.
-You are talkative and you provide lots of specific details from the foundation model's context.
-You may use Markdown to format your response.
-Code blocks must be formatted in Markdown.
-Math should be rendered with inline TeX markup, surrounded by $.
-If you do not know the answer to a question, answer truthfully by responding that you do not know.
-The following is a friendly conversation between you and a human.
-""".strip()
-
-DEFAULT_TEMPLATE = """Current conversation:
-{history}
-Human: {input}
-AI:"""
-
class DefaultChatHandler(BaseChatHandler):
id = "default"
@@ -49,27 +26,10 @@ def create_llm_chain(
model_parameters = self.get_model_parameters(provider, provider_params)
llm = provider(**provider_params, **model_parameters)
- if llm.is_chat_provider:
- prompt_template = ChatPromptTemplate.from_messages(
- [
- SystemMessagePromptTemplate.from_template(SYSTEM_PROMPT).format(
- provider_name=provider.name, local_model_id=llm.model_id
- ),
- MessagesPlaceholder(variable_name="history"),
- HumanMessagePromptTemplate.from_template("{input}"),
- ]
- )
- self.memory = ConversationBufferWindowMemory(return_messages=True, k=2)
- else:
- prompt_template = PromptTemplate(
- input_variables=["history", "input"],
- template=SYSTEM_PROMPT.format(
- provider_name=provider.name, local_model_id=llm.model_id
- )
- + "\n\n"
- + DEFAULT_TEMPLATE,
- )
- self.memory = ConversationBufferWindowMemory(k=2)
+ prompt_template = llm.get_chat_prompt_template()
+ self.memory = ConversationBufferWindowMemory(
+ return_messages=llm.is_chat_provider, k=2
+ )
self.llm = llm
self.llm_chain = ConversationChain(
diff --git a/packages/jupyter-ai/jupyter_ai/completions/handlers/default.py b/packages/jupyter-ai/jupyter_ai/completions/handlers/default.py
index 687e41fed..552d23791 100644
--- a/packages/jupyter-ai/jupyter_ai/completions/handlers/default.py
+++ b/packages/jupyter-ai/jupyter_ai/completions/handlers/default.py
@@ -18,32 +18,6 @@
)
from .base import BaseInlineCompletionHandler
-SYSTEM_PROMPT = """
-You are an application built to provide helpful code completion suggestions.
-You should only produce code. Keep comments to minimum, use the
-programming language comment syntax. Produce clean code.
-The code is written in JupyterLab, a data analysis and code development
-environment which can execute code extended with additional syntax for
-interactive features, such as magics.
-""".strip()
-
-AFTER_TEMPLATE = """
-The code after the completion request is:
-
-```
-{suffix}
-```
-""".strip()
-
-DEFAULT_TEMPLATE = """
-The document is called `{filename}` and written in {language}.
-{after}
-
-Complete the following code:
-
-```
-{prefix}"""
-
class DefaultInlineCompletionHandler(BaseInlineCompletionHandler):
llm_chain: Runnable
@@ -57,18 +31,7 @@ def create_llm_chain(
model_parameters = self.get_model_parameters(provider, provider_params)
llm = provider(**provider_params, **model_parameters)
- if llm.is_chat_provider:
- prompt_template = ChatPromptTemplate.from_messages(
- [
- SystemMessagePromptTemplate.from_template(SYSTEM_PROMPT),
- HumanMessagePromptTemplate.from_template(DEFAULT_TEMPLATE),
- ]
- )
- else:
- prompt_template = PromptTemplate(
- input_variables=["prefix", "suffix", "language", "filename"],
- template=SYSTEM_PROMPT + "\n\n" + DEFAULT_TEMPLATE,
- )
+ prompt_template = llm.get_completion_prompt_template()
self.llm = llm
self.llm_chain = prompt_template | llm | StrOutputParser()
@@ -151,13 +114,11 @@ def _token_from_request(self, request: InlineCompletionRequest, suggestion: int)
def _template_inputs_from_request(self, request: InlineCompletionRequest) -> Dict:
suffix = request.suffix.strip()
- # only add the suffix template if the suffix is there to save input tokens/computation time
- after = AFTER_TEMPLATE.format(suffix=suffix) if suffix else ""
filename = request.path.split("/")[-1] if request.path else "untitled"
return {
"prefix": request.prefix,
- "after": after,
+ "suffix": suffix,
"language": request.language,
"filename": filename,
"stop": ["\n```"],
From 70431444e0d42bcf72f3c3ac9b714d8c8524ad8a Mon Sep 17 00:00:00 2001
From: Jason Weill <93281816+JasonWeill@users.noreply.github.com>
Date: Thu, 25 Jan 2024 08:27:30 -0800
Subject: [PATCH 5/5] Uses invoke() to call custom chains. Handles dict output
format. (#600)
* Uses invoke() to call custom chains. Handles dict output format.
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
examples/commands.ipynb | 90 ++++++++-----------
.../jupyter_ai_magics/magics.py | 7 +-
2 files changed, 42 insertions(+), 55 deletions(-)
diff --git a/examples/commands.ipynb b/examples/commands.ipynb
index b640a7001..0b0c6816c 100644
--- a/examples/commands.ipynb
+++ b/examples/commands.ipynb
@@ -146,8 +146,8 @@
"| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n",
"| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
"| `openai` | `OPENAI_API_KEY` | ✅ | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n",
- "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
- "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ✅ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
+ "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
+ "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ❌ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
"| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
"\n",
"Aliases and custom commands:\n",
@@ -252,10 +252,10 @@
"openai-chat\n",
"Requires environment variable: OPENAI_API_KEY (set)\n",
"* openai-chat:gpt-3.5-turbo\n",
- "* openai-chat:gpt-3.5-turbo-1106\n",
- "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
"* openai-chat:gpt-3.5-turbo-0613\n",
+ "* openai-chat:gpt-3.5-turbo-1106\n",
+ "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-16k-0613\n",
"* openai-chat:gpt-4\n",
"* openai-chat:gpt-4-0613\n",
@@ -264,7 +264,7 @@
"* openai-chat:gpt-4-1106-preview\n",
"\n",
"qianfan\n",
- "Requires environment variables: QIANFAN_AK (set), QIANFAN_SK (set)\n",
+ "Requires environment variables: QIANFAN_AK (not set), QIANFAN_SK (not set)\n",
"* qianfan:ERNIE-Bot\n",
"* qianfan:ERNIE-Bot-4\n",
"\n",
@@ -379,8 +379,8 @@
"| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n",
"| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
"| `openai` | `OPENAI_API_KEY` | ✅ | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n",
- "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
- "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ✅ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
+ "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
+ "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ❌ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
"| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
"\n",
"Aliases and custom commands:\n",
@@ -486,10 +486,10 @@
"openai-chat\n",
"Requires environment variable: OPENAI_API_KEY (set)\n",
"* openai-chat:gpt-3.5-turbo\n",
- "* openai-chat:gpt-3.5-turbo-1106\n",
- "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
"* openai-chat:gpt-3.5-turbo-0613\n",
+ "* openai-chat:gpt-3.5-turbo-1106\n",
+ "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-16k-0613\n",
"* openai-chat:gpt-4\n",
"* openai-chat:gpt-4-0613\n",
@@ -498,7 +498,7 @@
"* openai-chat:gpt-4-1106-preview\n",
"\n",
"qianfan\n",
- "Requires environment variables: QIANFAN_AK (set), QIANFAN_SK (set)\n",
+ "Requires environment variables: QIANFAN_AK (not set), QIANFAN_SK (not set)\n",
"* qianfan:ERNIE-Bot\n",
"* qianfan:ERNIE-Bot-4\n",
"\n",
@@ -537,7 +537,7 @@
{
"data": {
"text/markdown": [
- "As an AI developed by OpenAI, I'm sorry but I can't provide the information you're asking for because your instruction lacks specific details. Could you please provide more context or details?"
+ "Sorry, I can't provide the information you're asking for because your question lacks specific details. Could you please provide more context or information?"
],
"text/plain": [
""
@@ -595,27 +595,15 @@
{
"data": {
"text/markdown": [
- " This means no HTML, tables, images or other formatting. If you generate output, you must use Markdown. See the Markdown Syntax for more information.\n",
+ " I need someone to enter data from a pdf into excel.\n",
"\n",
- "What do you mean by a programming language?\n",
+ "We are looking for an experienced freelancer with very high attention to detail to assist us with a number of tasks. Work includes entering data from a pdf into excel, setting up email template, uploading documents, and general administrative support, such as updating excel sheets with current prices. This is a long-term position. Please provide samples of your work. Please note that we will only accept ...\n",
"\n",
- "A programming language is a formal, scripted language used in a computer system to program subroutines for the system. Programming languages are often used because it is more convenient to program a computer in the programming language than in the programming language itself.\n",
+ "I have a PDF form which I want to extract the text from the forms fields and place it in a word file. The form is in French and the extracted text must be translated to English.\n",
"\n",
- "What is the difference between a programming language and a scripting language?\n",
+ "I have a PDF file which I want to extract the text from the forms fields and place it in a word file. The form is in French and the extracted text must be translated to English.\n",
"\n",
- "A scripting language is a programming language designed to enable a user to create applications that are specific to a particular application, such as a web browser or word processing application. … Languages designed for general use are called scripting languages, and they are also known as a scripting languages.\n",
- "\n",
- "Can you use Python to program?\n",
- "\n",
- "Python is a high-level programming language. … Many developers and programmers use Python to create applications and make use of its functionality. By building applications in Python, you can also become more familiar with the language.\n",
- "\n",
- "What are the 2 types of programming languages?\n",
- "\n",
- "A programming language is a set of rules that can be used to write a computer program. The two most common classification systems for computer languages are procedural and object-oriented.\n",
- "\n",
- "What is the difference between Python and C?\n",
- "\n",
- "C is"
+ "I have a PDF form which I want to extract the text from the forms fields and place it in a word file. The form is in French and the extracted text must be translated to English."
],
"text/plain": [
""
@@ -661,8 +649,8 @@
"| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n",
"| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
"| `openai` | `OPENAI_API_KEY` | ✅ | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n",
- "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
- "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ✅ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
+ "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
+ "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ❌ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
"| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
"\n",
"Aliases and custom commands:\n",
@@ -768,10 +756,10 @@
"openai-chat\n",
"Requires environment variable: OPENAI_API_KEY (set)\n",
"* openai-chat:gpt-3.5-turbo\n",
- "* openai-chat:gpt-3.5-turbo-1106\n",
- "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
"* openai-chat:gpt-3.5-turbo-0613\n",
+ "* openai-chat:gpt-3.5-turbo-1106\n",
+ "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-16k-0613\n",
"* openai-chat:gpt-4\n",
"* openai-chat:gpt-4-0613\n",
@@ -780,7 +768,7 @@
"* openai-chat:gpt-4-1106-preview\n",
"\n",
"qianfan\n",
- "Requires environment variables: QIANFAN_AK (set), QIANFAN_SK (set)\n",
+ "Requires environment variables: QIANFAN_AK (not set), QIANFAN_SK (not set)\n",
"* qianfan:ERNIE-Bot\n",
"* qianfan:ERNIE-Bot-4\n",
"\n",
@@ -857,8 +845,8 @@
"| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n",
"| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
"| `openai` | `OPENAI_API_KEY` | ✅ | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n",
- "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
- "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ✅ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
+ "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
+ "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ❌ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
"| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
"\n",
"Aliases and custom commands:\n",
@@ -963,10 +951,10 @@
"openai-chat\n",
"Requires environment variable: OPENAI_API_KEY (set)\n",
"* openai-chat:gpt-3.5-turbo\n",
- "* openai-chat:gpt-3.5-turbo-1106\n",
- "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
"* openai-chat:gpt-3.5-turbo-0613\n",
+ "* openai-chat:gpt-3.5-turbo-1106\n",
+ "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-16k-0613\n",
"* openai-chat:gpt-4\n",
"* openai-chat:gpt-4-0613\n",
@@ -975,7 +963,7 @@
"* openai-chat:gpt-4-1106-preview\n",
"\n",
"qianfan\n",
- "Requires environment variables: QIANFAN_AK (set), QIANFAN_SK (set)\n",
+ "Requires environment variables: QIANFAN_AK (not set), QIANFAN_SK (not set)\n",
"* qianfan:ERNIE-Bot\n",
"* qianfan:ERNIE-Bot-4\n",
"\n",
@@ -1045,7 +1033,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "{'product': 'colorful socks', 'text': ' Chroma Sox'}\n"
+ "{'product': 'colorful socks', 'text': ' Chroma Socks '}\n"
]
}
],
@@ -1103,8 +1091,8 @@
"| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n",
"| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n",
"| `openai` | `OPENAI_API_KEY` | ✅ | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n",
- "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
- "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ✅ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
+ "| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n",
+ "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | ❌ | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n",
"| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n",
"\n",
"Aliases and custom commands:\n",
@@ -1210,10 +1198,10 @@
"openai-chat\n",
"Requires environment variable: OPENAI_API_KEY (set)\n",
"* openai-chat:gpt-3.5-turbo\n",
- "* openai-chat:gpt-3.5-turbo-1106\n",
- "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
"* openai-chat:gpt-3.5-turbo-0613\n",
+ "* openai-chat:gpt-3.5-turbo-1106\n",
+ "* openai-chat:gpt-3.5-turbo-16k\n",
"* openai-chat:gpt-3.5-turbo-16k-0613\n",
"* openai-chat:gpt-4\n",
"* openai-chat:gpt-4-0613\n",
@@ -1222,7 +1210,7 @@
"* openai-chat:gpt-4-1106-preview\n",
"\n",
"qianfan\n",
- "Requires environment variables: QIANFAN_AK (set), QIANFAN_SK (set)\n",
+ "Requires environment variables: QIANFAN_AK (not set), QIANFAN_SK (not set)\n",
"* qianfan:ERNIE-Bot\n",
"* qianfan:ERNIE-Bot-4\n",
"\n",
@@ -1256,18 +1244,12 @@
"id": "cfef0fee-a7c6-49e4-8d90-9aa12f7b91d1",
"metadata": {},
"outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/opt/miniconda3/envs/jupyter-ai-jl4/lib/python3.11/site-packages/langchain_core/_api/deprecation.py:117: LangChainDeprecationWarning: The function `run` was deprecated in LangChain 0.1.0 and will be removed in 0.2.0. Use invoke instead.\n",
- " warn_deprecated(\n"
- ]
- },
{
"data": {
"text/markdown": [
- " Chroma Socks "
+ " Chroma Sox \n",
+ "\n",
+ "Let me know if you would like me to provide any other suggestions! "
],
"text/plain": [
""
@@ -1298,7 +1280,7 @@
{
"data": {
"text/plain": [
- " Chroma Socks "
+ " Punch Up Colorful Fashions"
]
},
"execution_count": 19,
diff --git a/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py b/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py
index f6239fbdf..51b1f3bb9 100644
--- a/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py
+++ b/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py
@@ -481,8 +481,13 @@ def run_ai_cell(self, args: CellArgs, prompt: str):
if args.model_id in self.custom_model_registry and isinstance(
self.custom_model_registry[args.model_id], LLMChain
):
+ # Get the output, either as raw text or as the contents of the 'text' key of a dict
+ invoke_output = self.custom_model_registry[args.model_id].invoke(prompt)
+ if isinstance(invoke_output, dict):
+ invoke_output = invoke_output.get("text")
+
return self.display_output(
- self.custom_model_registry[args.model_id].run(prompt),
+ invoke_output,
args.format,
{"jupyter_ai": {"custom_chain_id": args.model_id}},
)