diff --git a/docs/core_docs/.gitignore b/docs/core_docs/.gitignore
index b4db51d639de..6fc07ee76864 100644
--- a/docs/core_docs/.gitignore
+++ b/docs/core_docs/.gitignore
@@ -78,6 +78,8 @@ docs/how_to/tool_choice.md
docs/how_to/tool_choice.mdx
docs/how_to/tool_calls_multimodal.md
docs/how_to/tool_calls_multimodal.mdx
+docs/how_to/tool_calling_parallel.md
+docs/how_to/tool_calling_parallel.mdx
docs/how_to/tool_calling.md
docs/how_to/tool_calling.mdx
docs/how_to/tool_artifacts.md
@@ -210,6 +212,8 @@ docs/how_to/assign.md
docs/how_to/assign.mdx
docs/how_to/agent_executor.md
docs/how_to/agent_executor.mdx
+docs/integrations/toolkits/sql.md
+docs/integrations/toolkits/sql.mdx
docs/integrations/text_embedding/togetherai.md
docs/integrations/text_embedding/togetherai.mdx
docs/integrations/text_embedding/openai.md
@@ -222,6 +226,12 @@ docs/integrations/text_embedding/cohere.md
docs/integrations/text_embedding/cohere.mdx
docs/integrations/text_embedding/azure_openai.md
docs/integrations/text_embedding/azure_openai.mdx
+docs/integrations/stores/in_memory.md
+docs/integrations/stores/in_memory.mdx
+docs/integrations/stores/file_system.md
+docs/integrations/stores/file_system.mdx
+docs/integrations/retrievers/tavily.md
+docs/integrations/retrievers/tavily.mdx
docs/integrations/retrievers/kendra-retriever.md
docs/integrations/retrievers/kendra-retriever.mdx
docs/integrations/retrievers/exa.md
diff --git a/docs/core_docs/docs/how_to/chat_streaming.ipynb b/docs/core_docs/docs/how_to/chat_streaming.ipynb
index eea4dc036e9c..1fa1a98212ba 100644
--- a/docs/core_docs/docs/how_to/chat_streaming.ipynb
+++ b/docs/core_docs/docs/how_to/chat_streaming.ipynb
@@ -17,19 +17,17 @@
"source": [
"# How to stream chat model responses\n",
"\n",
- "All [chat models](https://v02.api.js.langchain.com/classes/langchain_core_language_models_chat_models.BaseChatModel.html) implement the [Runnable interface](https://v02.api.js.langchain.com/classes/langchain_core_runnables.Runnable.html), which comes with a **default** implementations of standard runnable methods (i.e. `invoke`, `batch`, `stream`, `streamEvents`).\n",
- "\n",
- "The **default** streaming implementation provides an `AsyncGenerator` that yields a single value: the final output from the underlying chat model provider.\n",
+ "All [chat models](https://api.js.langchain.com/classes/langchain_core_language_models_chat_models.BaseChatModel.html) implement the [Runnable interface](https://.api.js.langchain.com/classes/langchain_core_runnables.Runnable.html), which comes with **default** implementations of standard runnable methods (i.e. `invoke`, `batch`, `stream`, `streamEvents`). This guide covers how to use these methods to stream output from chat models.\n",
"\n",
":::{.callout-tip}\n",
"\n",
- "The **default** implementation does **not** provide support for token-by-token streaming, but it ensures that the the model can be swapped in for any other model as it supports the same standard interface.\n",
+ "The **default** implementation does **not** provide support for token-by-token streaming, and will instead return an [`AsyncGenerator`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/AsyncGenerator) that will yield all model output in a single chunk. It exists to ensures that the the model can be swapped in for any other model as it supports the same standard interface.\n",
"\n",
- ":::\n",
+ "The ability to stream the output token-by-token depends on whether the provider has implemented token-by-token streaming support.\n",
"\n",
- "The ability to stream the output token-by-token depends on whether the provider has implemented proper streaming support.\n",
+ "You can see which [integrations support token-by-token streaming here](/docs/integrations/chat/).\n",
"\n",
- "See which [integrations support token-by-token streaming here](/docs/integrations/chat/)."
+ ":::"
]
},
{
@@ -56,7 +54,22 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 1,
+ "id": "2331cfb5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "// @lc-docs-hide-cell\n",
+ "import { ChatAnthropic } from \"@langchain/anthropic\";\n",
+ "\n",
+ "const model = new ChatAnthropic({\n",
+ " model: \"claude-3-5-sonnet-20240620\",\n",
+ "});"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
"id": "975c4f32-21f6-4a71-9091-f87b56347c33",
"metadata": {
"tags": []
@@ -68,44 +81,117 @@
"text": [
"\n",
"---\n",
- "Sw\n",
+ "Here's\n",
+ "---\n",
+ " a one\n",
+ "---\n",
+ "-\n",
+ "---\n",
+ "verse song about goldfish on\n",
+ "---\n",
+ " the moon:\n",
+ "\n",
+ "Verse\n",
+ "---\n",
+ ":\n",
+ "Swimming\n",
+ "---\n",
+ " through the stars\n",
"---\n",
- "imming\n",
+ ",\n",
"---\n",
" in\n",
"---\n",
- " a\n",
+ " a cosmic\n",
"---\n",
- " world\n",
+ " lag\n",
"---\n",
- " of\n",
+ "oon\n",
"---\n",
- " silver\n",
+ "\n",
+ "Little\n",
"---\n",
- " beams\n",
+ " golden\n",
+ "---\n",
+ " scales\n",
"---\n",
",\n",
+ "---\n",
+ " reflecting the moon\n",
+ "---\n",
+ "\n",
+ "No\n",
+ "---\n",
+ " gravity to\n",
+ "---\n",
+ " hold them,\n",
+ "---\n",
+ " they\n",
+ "---\n",
+ " float with\n",
+ "---\n",
+ " glee\n",
+ "Goldfish\n",
+ "---\n",
+ " astron\n",
+ "---\n",
+ "auts, on a lunar\n",
+ "---\n",
+ " sp\n",
+ "---\n",
+ "ree\n",
+ "---\n",
+ "\n",
+ "Bub\n",
+ "---\n",
+ "bles rise\n",
+ "---\n",
+ " like\n",
+ "---\n",
+ " com\n",
+ "---\n",
+ "ets, in the\n",
+ "---\n",
+ " star\n",
+ "---\n",
+ "ry night\n",
+ "---\n",
"\n",
+ "Their fins like\n",
+ "---\n",
+ " tiny\n",
+ "---\n",
+ " rockets, a\n",
"---\n",
- "Gold\n",
+ " w\n",
"---\n",
- "fish\n",
+ "ondrous sight\n",
+ "Who\n",
"---\n",
- " on\n",
+ " knew\n",
"---\n",
- " the\n",
+ " these\n",
"---\n",
- " moon\n",
+ " small\n",
+ "---\n",
+ " creatures\n",
"---\n",
",\n",
"---\n",
- " living\n",
+ " could con\n",
+ "---\n",
+ "quer space?\n",
+ "---\n",
+ "\n",
+ "Goldfish on the moon,\n",
"---\n",
- " their\n",
+ " with\n",
"---\n",
- " dreams\n",
+ " such\n",
"---\n",
- ".\n",
+ " fis\n",
+ "---\n",
+ "hy grace\n",
"---\n",
"\n",
"---\n",
@@ -118,8 +204,7 @@
"const stream = await model.stream(\"Write me a 1 verse song about goldfish on the moon\")\n",
"\n",
"for await (const chunk of stream) {\n",
- " console.log(`${chunk.content}\n",
- "---`);\n",
+ " console.log(`${chunk.content}\\n---`);\n",
"}"
]
},
@@ -130,150 +215,139 @@
"source": [
"## Stream events\n",
"\n",
- "Chat models also support the standard [streamEvents()](https://v02.api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#streamEvents) method.\n",
+ "Chat models also support the standard [`streamEvents()`](https://v02.api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#streamEvents) method to stream more granular events from within chains.\n",
"\n",
- "This method is useful if you're streaming output from a larger LLM application that contains multiple steps (e.g., a chain composed of a prompt, chat model and parser)."
+ "This method is useful if you're streaming output from a larger LLM application that contains multiple steps (e.g., a chain composed of a prompt, chat model and parser):"
]
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 3,
"id": "27bd1dfd-8ae2-49d6-b526-97180c81b5f4",
"metadata": {
"tags": []
},
"outputs": [
{
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{\n",
- " event: 'on_chat_model_start',\n",
- " data: { input: 'Write me a 1 verse song about goldfish on the moon' },\n",
- " name: 'ChatOpenAI',\n",
- " tags: [],\n",
- " run_id: 'c9966059-70eb-4f24-9de3-2cf04320c8f6',\n",
- " metadata: {\n",
- " ls_provider: 'openai',\n",
- " ls_model_name: 'gpt-3.5-turbo',\n",
- " ls_model_type: 'chat',\n",
- " ls_temperature: 1,\n",
- " ls_max_tokens: undefined,\n",
- " ls_stop: undefined\n",
- " }\n",
- "}\n",
- "{\n",
- " event: 'on_chat_model_stream',\n",
- " data: {\n",
- " chunk: AIMessageChunk {\n",
- " lc_serializable: true,\n",
- " lc_kwargs: [Object],\n",
- " lc_namespace: [Array],\n",
- " content: '',\n",
- " name: undefined,\n",
- " additional_kwargs: {},\n",
- " response_metadata: [Object],\n",
- " id: 'chatcmpl-9lOQhe44ip2q0DHfr0eYU9TF4mHtu',\n",
- " tool_calls: [],\n",
- " invalid_tool_calls: [],\n",
- " tool_call_chunks: [],\n",
- " usage_metadata: undefined\n",
- " }\n",
- " },\n",
- " run_id: 'c9966059-70eb-4f24-9de3-2cf04320c8f6',\n",
- " name: 'ChatOpenAI',\n",
- " tags: [],\n",
- " metadata: {\n",
- " ls_provider: 'openai',\n",
- " ls_model_name: 'gpt-3.5-turbo',\n",
- " ls_model_type: 'chat',\n",
- " ls_temperature: 1,\n",
- " ls_max_tokens: undefined,\n",
- " ls_stop: undefined\n",
- " }\n",
- "}\n",
- "{\n",
- " event: 'on_chat_model_stream',\n",
- " run_id: 'c9966059-70eb-4f24-9de3-2cf04320c8f6',\n",
- " name: 'ChatOpenAI',\n",
- " tags: [],\n",
- " metadata: {\n",
- " ls_provider: 'openai',\n",
- " ls_model_name: 'gpt-3.5-turbo',\n",
- " ls_model_type: 'chat',\n",
- " ls_temperature: 1,\n",
- " ls_max_tokens: undefined,\n",
- " ls_stop: undefined\n",
- " },\n",
- " data: {\n",
- " chunk: AIMessageChunk {\n",
- " lc_serializable: true,\n",
- " lc_kwargs: [Object],\n",
- " lc_namespace: [Array],\n",
- " content: '',\n",
- " name: undefined,\n",
- " additional_kwargs: {},\n",
- " response_metadata: [Object],\n",
- " id: 'chatcmpl-9lOQhe44ip2q0DHfr0eYU9TF4mHtu',\n",
- " tool_calls: [],\n",
- " invalid_tool_calls: [],\n",
- " tool_call_chunks: [],\n",
- " usage_metadata: undefined\n",
- " }\n",
- " }\n",
- "}\n",
- "{\n",
- " event: 'on_chat_model_stream',\n",
- " data: {\n",
- " chunk: AIMessageChunk {\n",
- " lc_serializable: true,\n",
- " lc_kwargs: [Object],\n",
- " lc_namespace: [Array],\n",
- " content: 'Sw',\n",
- " name: undefined,\n",
- " additional_kwargs: {},\n",
- " response_metadata: [Object],\n",
- " id: 'chatcmpl-9lOQhe44ip2q0DHfr0eYU9TF4mHtu',\n",
- " tool_calls: [],\n",
- " invalid_tool_calls: [],\n",
- " tool_call_chunks: [],\n",
- " usage_metadata: undefined\n",
- " }\n",
- " },\n",
- " run_id: 'c9966059-70eb-4f24-9de3-2cf04320c8f6',\n",
- " name: 'ChatOpenAI',\n",
- " tags: [],\n",
- " metadata: {\n",
- " ls_provider: 'openai',\n",
- " ls_model_name: 'gpt-3.5-turbo',\n",
- " ls_model_type: 'chat',\n",
- " ls_temperature: 1,\n",
- " ls_max_tokens: undefined,\n",
- " ls_stop: undefined\n",
- " }\n",
- "}\n",
- "...Truncated\n"
- ]
+ "data": {
+ "text/plain": [
+ "[\n",
+ " {\n",
+ " event: \u001b[32m\"on_chat_model_start\"\u001b[39m,\n",
+ " data: { input: \u001b[32m\"Write me a 1 verse song about goldfish on the moon\"\u001b[39m },\n",
+ " name: \u001b[32m\"ChatAnthropic\"\u001b[39m,\n",
+ " tags: [],\n",
+ " run_id: \u001b[32m\"d60a87d6-acf0-4ae1-bf27-e570aa101960\"\u001b[39m,\n",
+ " metadata: {\n",
+ " ls_provider: \u001b[32m\"openai\"\u001b[39m,\n",
+ " ls_model_name: \u001b[32m\"claude-3-5-sonnet-20240620\"\u001b[39m,\n",
+ " ls_model_type: \u001b[32m\"chat\"\u001b[39m,\n",
+ " ls_temperature: \u001b[33m1\u001b[39m,\n",
+ " ls_max_tokens: \u001b[33m2048\u001b[39m,\n",
+ " ls_stop: \u001b[90mundefined\u001b[39m\n",
+ " }\n",
+ " },\n",
+ " {\n",
+ " event: \u001b[32m\"on_chat_model_stream\"\u001b[39m,\n",
+ " run_id: \u001b[32m\"d60a87d6-acf0-4ae1-bf27-e570aa101960\"\u001b[39m,\n",
+ " name: \u001b[32m\"ChatAnthropic\"\u001b[39m,\n",
+ " tags: [],\n",
+ " metadata: {\n",
+ " ls_provider: \u001b[32m\"openai\"\u001b[39m,\n",
+ " ls_model_name: \u001b[32m\"claude-3-5-sonnet-20240620\"\u001b[39m,\n",
+ " ls_model_type: \u001b[32m\"chat\"\u001b[39m,\n",
+ " ls_temperature: \u001b[33m1\u001b[39m,\n",
+ " ls_max_tokens: \u001b[33m2048\u001b[39m,\n",
+ " ls_stop: \u001b[90mundefined\u001b[39m\n",
+ " },\n",
+ " data: {\n",
+ " chunk: AIMessageChunk {\n",
+ " lc_serializable: \u001b[33mtrue\u001b[39m,\n",
+ " lc_kwargs: {\n",
+ " content: \u001b[32m\"\"\u001b[39m,\n",
+ " additional_kwargs: \u001b[36m[Object]\u001b[39m,\n",
+ " tool_calls: [],\n",
+ " invalid_tool_calls: [],\n",
+ " tool_call_chunks: [],\n",
+ " response_metadata: {}\n",
+ " },\n",
+ " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n",
+ " content: \u001b[32m\"\"\u001b[39m,\n",
+ " name: \u001b[90mundefined\u001b[39m,\n",
+ " additional_kwargs: {\n",
+ " id: \u001b[32m\"msg_01JaaH9ZUXg7bUnxzktypRak\"\u001b[39m,\n",
+ " type: \u001b[32m\"message\"\u001b[39m,\n",
+ " role: \u001b[32m\"assistant\"\u001b[39m,\n",
+ " model: \u001b[32m\"claude-3-5-sonnet-20240620\"\u001b[39m\n",
+ " },\n",
+ " response_metadata: {},\n",
+ " id: \u001b[90mundefined\u001b[39m,\n",
+ " tool_calls: [],\n",
+ " invalid_tool_calls: [],\n",
+ " tool_call_chunks: [],\n",
+ " usage_metadata: \u001b[90mundefined\u001b[39m\n",
+ " }\n",
+ " }\n",
+ " },\n",
+ " {\n",
+ " event: \u001b[32m\"on_chat_model_stream\"\u001b[39m,\n",
+ " run_id: \u001b[32m\"d60a87d6-acf0-4ae1-bf27-e570aa101960\"\u001b[39m,\n",
+ " name: \u001b[32m\"ChatAnthropic\"\u001b[39m,\n",
+ " tags: [],\n",
+ " metadata: {\n",
+ " ls_provider: \u001b[32m\"openai\"\u001b[39m,\n",
+ " ls_model_name: \u001b[32m\"claude-3-5-sonnet-20240620\"\u001b[39m,\n",
+ " ls_model_type: \u001b[32m\"chat\"\u001b[39m,\n",
+ " ls_temperature: \u001b[33m1\u001b[39m,\n",
+ " ls_max_tokens: \u001b[33m2048\u001b[39m,\n",
+ " ls_stop: \u001b[90mundefined\u001b[39m\n",
+ " },\n",
+ " data: {\n",
+ " chunk: AIMessageChunk {\n",
+ " lc_serializable: \u001b[33mtrue\u001b[39m,\n",
+ " lc_kwargs: {\n",
+ " content: \u001b[32m\"Here's\"\u001b[39m,\n",
+ " additional_kwargs: {},\n",
+ " tool_calls: [],\n",
+ " invalid_tool_calls: [],\n",
+ " tool_call_chunks: [],\n",
+ " response_metadata: {}\n",
+ " },\n",
+ " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n",
+ " content: \u001b[32m\"Here's\"\u001b[39m,\n",
+ " name: \u001b[90mundefined\u001b[39m,\n",
+ " additional_kwargs: {},\n",
+ " response_metadata: {},\n",
+ " id: \u001b[90mundefined\u001b[39m,\n",
+ " tool_calls: [],\n",
+ " invalid_tool_calls: [],\n",
+ " tool_call_chunks: [],\n",
+ " usage_metadata: \u001b[90mundefined\u001b[39m\n",
+ " }\n",
+ " }\n",
+ " }\n",
+ "]"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
}
],
"source": [
- "let idx = 0\n",
- "\n",
- "const stream = model.streamEvents(\n",
- " \"Write me a 1 verse song about goldfish on the moon\",\n",
- " {\n",
- " version: \"v2\"\n",
- " }\n",
+ "const eventStream = await model.streamEvents(\n",
+ " \"Write me a 1 verse song about goldfish on the moon\",\n",
+ " {\n",
+ " version: \"v2\"\n",
+ " },\n",
");\n",
"\n",
- "for await (const event of stream) {\n",
- " idx += 1\n",
- " if (idx === 5) {\n",
- " console.log(\"...Truncated\");\n",
- " break;\n",
- " } \n",
- " console.log(event);\n",
- "}"
+ "const events = [];\n",
+ "for await (const event of eventStream) {\n",
+ " events.push(event);\n",
+ "}\n",
+ "\n",
+ "events.slice(0, 3);"
]
},
{
@@ -291,20 +365,17 @@
],
"metadata": {
"kernelspec": {
- "display_name": "TypeScript",
+ "display_name": "Deno",
"language": "typescript",
- "name": "tslab"
+ "name": "deno"
},
"language_info": {
- "codemirror_mode": {
- "mode": "typescript",
- "name": "javascript",
- "typescript": true
- },
"file_extension": ".ts",
- "mimetype": "text/typescript",
+ "mimetype": "text/x.typescript",
"name": "typescript",
- "version": "3.7.2"
+ "nb_converter": "script",
+ "pygments_lexer": "typescript",
+ "version": "5.3.3"
}
},
"nbformat": 4,
diff --git a/docs/core_docs/docs/how_to/index.mdx b/docs/core_docs/docs/how_to/index.mdx
index 0f4af5b3c37b..77587c33ce60 100644
--- a/docs/core_docs/docs/how_to/index.mdx
+++ b/docs/core_docs/docs/how_to/index.mdx
@@ -78,6 +78,7 @@ These are the core building blocks you can use when building applications.
- [How to: stream tool calls](/docs/how_to/tool_streaming)
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
- [How to: force a specific tool call](/docs/how_to/tool_choice)
+- [How to: disable parallel tool calling](/docs/how_to/tool_calling_parallel/)
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
### Messages
@@ -171,6 +172,8 @@ LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to p
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
- [How to: handle tool errors](/docs/how_to/tools_error)
+- [How to: force a specific tool call](/docs/how_to/tool_choice/)
+- [How to: disable parallel tool calling](/docs/how_to/tool_calling_parallel/)
- [How to: access the `RunnableConfig` object within a custom tool](/docs/how_to/tool_configure)
- [How to: stream events from child runs within a custom tool](/docs/how_to/tool_stream_events)
- [How to: return artifacts from a tool](/docs/how_to/tool_artifacts)
diff --git a/docs/core_docs/docs/how_to/tool_calling_parallel.ipynb b/docs/core_docs/docs/how_to/tool_calling_parallel.ipynb
new file mode 100644
index 000000000000..bbe895997c7d
--- /dev/null
+++ b/docs/core_docs/docs/how_to/tool_calling_parallel.ipynb
@@ -0,0 +1,223 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# How to disable parallel tool calling\n",
+ "\n",
+ "```{=mdx}\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "\n",
+ "- [LangChain Tools](/docs/concepts/#tools)\n",
+ "- [Tool calling](/docs/concepts/#functiontool-calling)\n",
+ "- [Custom tools](/docs/how_to/custom_tools)\n",
+ "\n",
+ ":::\n",
+ "```\n",
+ "\n",
+ ":::info OpenAI-specific\n",
+ "\n",
+ "This API is currently only supported by OpenAI.\n",
+ "\n",
+ ":::\n",
+ "\n",
+ "OpenAI models perform tool calling in parallel by default. That means that if we ask a question like `\"What is the weather in Tokyo, New York, and Chicago?\"` and we have a tool for getting the weather, it will call the tool 3 times in parallel. We can force it to call only a single tool once by using the `parallel_tool_call` call option."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First let's set up our tools and model:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import { ChatOpenAI } from \"@langchain/openai\";\n",
+ "import { z } from \"zod\";\n",
+ "import { tool } from \"@langchain/core/tools\";\n",
+ "\n",
+ "const adderTool = tool(async ({ a, b }) => {\n",
+ " return a + b;\n",
+ "}, {\n",
+ " name: \"add\",\n",
+ " description: \"Adds a and b\",\n",
+ " schema: z.object({\n",
+ " a: z.number(),\n",
+ " b: z.number(),\n",
+ " })\n",
+ "});\n",
+ "\n",
+ "const multiplyTool = tool(async ({ a, b }) => {\n",
+ " return a + b;\n",
+ "}, {\n",
+ " name: \"multiply\",\n",
+ " description: \"Multiplies a and b\",\n",
+ " schema: z.object({\n",
+ " a: z.number(),\n",
+ " b: z.number(),\n",
+ " })\n",
+ "});\n",
+ "\n",
+ "const tools = [adderTool, multiplyTool];\n",
+ "\n",
+ "const llm = new ChatOpenAI({\n",
+ " model: \"gpt-4o-mini\",\n",
+ " temperature: 0,\n",
+ "});"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's show a quick example of how disabling parallel tool calls work:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[\n",
+ " {\n",
+ " name: 'add',\n",
+ " args: { a: 5, b: 3 },\n",
+ " type: 'tool_call',\n",
+ " id: 'call_5bKOYerdQU6J5ERJJYnzYsGn'\n",
+ " }\n",
+ "]\n"
+ ]
+ }
+ ],
+ "source": [
+ "const llmWithTools = llm.bindTools(tools, { parallel_tool_calls: false });\n",
+ "\n",
+ "const result = await llmWithTools.invoke(\"Please call the first tool two times\");\n",
+ "\n",
+ "result.tool_calls;"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As we can see, even though we explicitly told the model to call a tool twice, by disabling parallel tool calls the model was constrained to only calling one.\n",
+ "\n",
+ "Compare this to calling the model without passing `parallel_tool_calls` as false:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[\n",
+ " {\n",
+ " name: 'add',\n",
+ " args: { a: 1, b: 2 },\n",
+ " type: 'tool_call',\n",
+ " id: 'call_Ni0tF0nNtY66BBwB5vEP6oI4'\n",
+ " },\n",
+ " {\n",
+ " name: 'add',\n",
+ " args: { a: 3, b: 4 },\n",
+ " type: 'tool_call',\n",
+ " id: 'call_XucnTCfFqP1JBs3LtbOq5w3d'\n",
+ " }\n",
+ "]\n"
+ ]
+ }
+ ],
+ "source": [
+ "const llmWithNoBoundParam = llm.bindTools(tools);\n",
+ "\n",
+ "const result2 = await llmWithNoBoundParam.invoke(\"Please call the first tool two times\");\n",
+ "\n",
+ "result2.tool_calls;"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can see that you get two tool calls.\n",
+ "\n",
+ "You can also pass the parameter in at runtime like this:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[\n",
+ " {\n",
+ " name: 'add',\n",
+ " args: { a: 1, b: 2 },\n",
+ " type: 'tool_call',\n",
+ " id: 'call_TWo6auul71NUg1p0suzBKARt'\n",
+ " }\n",
+ "]\n"
+ ]
+ }
+ ],
+ "source": [
+ "const result3 = await llmWithNoBoundParam.invoke(\"Please call the first tool two times\", {\n",
+ " parallel_tool_calls: false,\n",
+ "});\n",
+ "\n",
+ "result3.tool_calls;"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Related\n",
+ "\n",
+ "- [How to: create custom tools](/docs/how_to/custom_tools)\n",
+ "- [How to: pass run time values to tools](/docs/how_to/tool_runtime)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "TypeScript",
+ "language": "typescript",
+ "name": "tslab"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "mode": "typescript",
+ "name": "javascript",
+ "typescript": true
+ },
+ "file_extension": ".ts",
+ "mimetype": "text/typescript",
+ "name": "typescript",
+ "version": "3.7.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/docs/core_docs/docs/integrations/chat/alibaba_tongyi.mdx b/docs/core_docs/docs/integrations/chat/alibaba_tongyi.mdx
index 2f19f3f9a4aa..e07db15def43 100644
--- a/docs/core_docs/docs/integrations/chat/alibaba_tongyi.mdx
+++ b/docs/core_docs/docs/integrations/chat/alibaba_tongyi.mdx
@@ -29,3 +29,8 @@ Here's an example:
import Tongyi from "@examples/models/chat/integration_alitongyi.ts";
{Tongyi}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/anthropic.ipynb b/docs/core_docs/docs/integrations/chat/anthropic.ipynb
index b3fce19f740f..e6138f826d0d 100644
--- a/docs/core_docs/docs/integrations/chat/anthropic.ipynb
+++ b/docs/core_docs/docs/integrations/chat/anthropic.ipynb
@@ -33,6 +33,9 @@
"| [ChatAnthropic](https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html) | [@langchain/anthropic](https://api.js.langchain.com/modules/langchain_anthropic.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/anthropic?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/anthropic?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n",
diff --git a/docs/core_docs/docs/integrations/chat/anthropic_tools.mdx b/docs/core_docs/docs/integrations/chat/anthropic_tools.mdx
index 51d96c38c0ad..2dd8c57d3686 100644
--- a/docs/core_docs/docs/integrations/chat/anthropic_tools.mdx
+++ b/docs/core_docs/docs/integrations/chat/anthropic_tools.mdx
@@ -75,3 +75,8 @@ import WSAExample from "@examples/models/chat/integration_anthropic_tools_wsa.ts
import WSAJSONExample from "@examples/models/chat/integration_anthropic_tools_wsa_json.ts";
{WSAJSONExample}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/azure.ipynb b/docs/core_docs/docs/integrations/chat/azure.ipynb
index dbae00d112e2..d48eaf98dc94 100644
--- a/docs/core_docs/docs/integrations/chat/azure.ipynb
+++ b/docs/core_docs/docs/integrations/chat/azure.ipynb
@@ -21,16 +21,21 @@
"source": [
"# AzureChatOpenAI\n",
"\n",
+ "[Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/) is a Microsoft Azure service that provides powerful language models from OpenAI.\n",
+ "\n",
"This will help you getting started with AzureChatOpenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all AzureChatOpenAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html).\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
- "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/v0.2/docs/integrations/chat/azure) | Package downloads | Package latest |\n",
+ "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/v0.2/docs/integrations/chat/azure_chat_openai) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [AzureChatOpenAI](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | \n",
diff --git a/docs/core_docs/docs/integrations/chat/baidu_qianfan.mdx b/docs/core_docs/docs/integrations/chat/baidu_qianfan.mdx
index 350eb04b3444..a4df5ccfa63a 100644
--- a/docs/core_docs/docs/integrations/chat/baidu_qianfan.mdx
+++ b/docs/core_docs/docs/integrations/chat/baidu_qianfan.mdx
@@ -37,3 +37,8 @@ Qianfan's API also supports streaming token responses. The example below demonst
import ChatBaiduQianfanStreamExample from "@examples/models/chat/chat_stream_baidu_qianfan.ts";
{ChatBaiduQianfanStreamExample}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/baidu_wenxin.mdx b/docs/core_docs/docs/integrations/chat/baidu_wenxin.mdx
index 580b3011c59f..3a36bc738673 100644
--- a/docs/core_docs/docs/integrations/chat/baidu_wenxin.mdx
+++ b/docs/core_docs/docs/integrations/chat/baidu_wenxin.mdx
@@ -32,3 +32,8 @@ Abandoned models: `ERNIE-Bot-turbo`
import Wenxin from "@examples/models/chat/integration_baiduwenxin.ts";
{Wenxin}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/bedrock.mdx b/docs/core_docs/docs/integrations/chat/bedrock.mdx
index 49c291da8fbc..3107ac1a0c3c 100644
--- a/docs/core_docs/docs/integrations/chat/bedrock.mdx
+++ b/docs/core_docs/docs/integrations/chat/bedrock.mdx
@@ -88,3 +88,8 @@ import WSOExample from "@examples/models/chat/integration_bedrock_wso.ts";
:::tip
See the LangSmith trace [here](https://smith.langchain.com/public/1f7b1ad8-e4ac-4965-8ce1-fae06005f3d7/r)
:::
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/bedrock_converse.mdx b/docs/core_docs/docs/integrations/chat/bedrock_converse.mdx
index 2b82a67898b6..8288fa8a636d 100644
--- a/docs/core_docs/docs/integrations/chat/bedrock_converse.mdx
+++ b/docs/core_docs/docs/integrations/chat/bedrock_converse.mdx
@@ -75,3 +75,8 @@ import WSOExample from "@examples/models/chat/integration_bedrock_wso_converse.t
:::tip
See the LangSmith trace [here](https://smith.langchain.com/public/982940c4-5f96-4168-80c9-99102c3e073a/r)
:::
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/cloudflare_workersai.mdx b/docs/core_docs/docs/integrations/chat/cloudflare_workersai.mdx
index ec39480b42ca..924cbd0912de 100644
--- a/docs/core_docs/docs/integrations/chat/cloudflare_workersai.mdx
+++ b/docs/core_docs/docs/integrations/chat/cloudflare_workersai.mdx
@@ -26,3 +26,8 @@ import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tool
{Example}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/cohere.ipynb b/docs/core_docs/docs/integrations/chat/cohere.ipynb
index 5d8eee36f4dd..e42647e5b644 100644
--- a/docs/core_docs/docs/integrations/chat/cohere.ipynb
+++ b/docs/core_docs/docs/integrations/chat/cohere.ipynb
@@ -21,7 +21,9 @@
"source": [
"# ChatCohere\n",
"\n",
- "This will help you getting started with ChatCohere [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatCohere features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html).\n",
+ "[Cohere](https://cohere.com/) is a Canadian startup that provides natural language processing models that help companies improve human-machine interactions.\n",
+ "\n",
+ "This will help you getting started with Cohere [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatCohere` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html).\n",
"\n",
"## Overview\n",
"### Integration details\n",
@@ -31,6 +33,9 @@
"| [ChatCohere](https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html) | [@langchain/cohere](https://api.js.langchain.com/modules/langchain_cohere.html) | ❌ | beta | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cohere?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cohere?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | \n",
@@ -358,206 +363,6 @@
")"
]
},
- {
- "cell_type": "markdown",
- "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
- "metadata": {},
- "source": [
- "## Streaming\n",
- "\n",
- "Cohere's API also supports streaming token responses. The example below demonstrates how to use this feature."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "id": "4e1fe6b2",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "stream tokens: The sky appears blue to human observers because blue light from the sun is scattered in all directions by the gases and particles in the Earth's atmosphere. This process is called Rayleigh scattering.\n",
- "stream iters: 38\n"
- ]
- }
- ],
- "source": [
- "import { ChatCohere } from \"@langchain/cohere\";\n",
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n",
- "\n",
- "const streamingLLM = new ChatCohere({\n",
- " apiKey: process.env.COHERE_API_KEY, // Default\n",
- "});\n",
- "const promptForStreaming = ChatPromptTemplate.fromMessages([\n",
- " [\"ai\", \"You are a helpful assistant\"],\n",
- " [\"human\", \"{input}\"],\n",
- "]);\n",
- "const outputParserForStreaming = new StringOutputParser();\n",
- "const chainForStreaming = promptForStreaming.pipe(streamingLLM).pipe(outputParserForStreaming);\n",
- "const streamRes = await chainForStreaming.stream({\n",
- " input: \"Why is the sky blue? Be concise with your answer.\",\n",
- "});\n",
- "let streamTokens = \"\";\n",
- "let streamIters = 0;\n",
- "for await (const streamChunk of streamRes) {\n",
- " streamTokens += streamChunk;\n",
- " streamIters += 1;\n",
- "}\n",
- "console.log(\"stream tokens:\", streamTokens);\n",
- "console.log(\"stream iters:\", streamIters);"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "1f756341",
- "metadata": {},
- "source": [
- "## Tools\n",
- "\n",
- "The Cohere API supports tool calling, along with multi-hop-tool calling. The following example demonstrates how to call tools:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "b285e783",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "AIMessage {\n",
- " \"content\": \"I will use the magic_function tool to answer this question.\",\n",
- " \"additional_kwargs\": {\n",
- " \"response_id\": \"be5be048-94b5-4fb6-a5fb-5ed321b87120\",\n",
- " \"generationId\": \"ea2a1e0b-9eea-4c5b-a77b-5e34226ffe80\",\n",
- " \"chatHistory\": [\n",
- " {\n",
- " \"role\": \"USER\",\n",
- " \"message\": \"What is the magic function of number 5?\"\n",
- " },\n",
- " {\n",
- " \"role\": \"CHATBOT\",\n",
- " \"message\": \"I will use the magic_function tool to answer this question.\",\n",
- " \"toolCalls\": \"[Array]\"\n",
- " }\n",
- " ],\n",
- " \"finishReason\": \"COMPLETE\",\n",
- " \"meta\": {\n",
- " \"apiVersion\": {\n",
- " \"version\": \"1\"\n",
- " },\n",
- " \"billedUnits\": {\n",
- " \"inputTokens\": 30,\n",
- " \"outputTokens\": 21\n",
- " },\n",
- " \"tokens\": {\n",
- " \"inputTokens\": 904,\n",
- " \"outputTokens\": 54\n",
- " }\n",
- " },\n",
- " \"toolCalls\": [\n",
- " {\n",
- " \"id\": \"eafa35bd-7e2c-4601-a754-15219be4\",\n",
- " \"function\": \"[Object]\",\n",
- " \"type\": \"function\"\n",
- " }\n",
- " ]\n",
- " },\n",
- " \"response_metadata\": {\n",
- " \"estimatedTokenUsage\": {\n",
- " \"completionTokens\": 54,\n",
- " \"promptTokens\": 904,\n",
- " \"totalTokens\": 958\n",
- " },\n",
- " \"response_id\": \"be5be048-94b5-4fb6-a5fb-5ed321b87120\",\n",
- " \"generationId\": \"ea2a1e0b-9eea-4c5b-a77b-5e34226ffe80\",\n",
- " \"chatHistory\": [\n",
- " {\n",
- " \"role\": \"USER\",\n",
- " \"message\": \"What is the magic function of number 5?\"\n",
- " },\n",
- " {\n",
- " \"role\": \"CHATBOT\",\n",
- " \"message\": \"I will use the magic_function tool to answer this question.\",\n",
- " \"toolCalls\": \"[Array]\"\n",
- " }\n",
- " ],\n",
- " \"finishReason\": \"COMPLETE\",\n",
- " \"meta\": {\n",
- " \"apiVersion\": {\n",
- " \"version\": \"1\"\n",
- " },\n",
- " \"billedUnits\": {\n",
- " \"inputTokens\": 30,\n",
- " \"outputTokens\": 21\n",
- " },\n",
- " \"tokens\": {\n",
- " \"inputTokens\": 904,\n",
- " \"outputTokens\": 54\n",
- " }\n",
- " },\n",
- " \"toolCalls\": [\n",
- " {\n",
- " \"id\": \"eafa35bd-7e2c-4601-a754-15219be4\",\n",
- " \"function\": \"[Object]\",\n",
- " \"type\": \"function\"\n",
- " }\n",
- " ]\n",
- " },\n",
- " \"tool_calls\": [\n",
- " {\n",
- " \"name\": \"magic_function\",\n",
- " \"args\": {\n",
- " \"num\": 5\n",
- " },\n",
- " \"id\": \"eafa35bd-7e2c-4601-a754-15219be4\",\n",
- " \"type\": \"tool_call\"\n",
- " }\n",
- " ],\n",
- " \"invalid_tool_calls\": [],\n",
- " \"usage_metadata\": {\n",
- " \"input_tokens\": 904,\n",
- " \"output_tokens\": 54,\n",
- " \"total_tokens\": 958\n",
- " }\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "import { ChatCohere } from \"@langchain/cohere\";\n",
- "import { HumanMessage } from \"@langchain/core/messages\";\n",
- "import { z } from \"zod\";\n",
- "import { tool } from \"@langchain/core/tools\";\n",
- "\n",
- "const llmForTools = new ChatCohere({\n",
- " apiKey: process.env.COHERE_API_KEY, // Default\n",
- "});\n",
- "\n",
- "const magicFunctionTool = tool(\n",
- " async ({ num }) => {\n",
- " return `The magic function of ${num} is ${num + 5}`;\n",
- " },\n",
- " {\n",
- " name: \"magic_function\",\n",
- " description: \"Apply a magic function to the input number\",\n",
- " schema: z.object({\n",
- " num: z.number().describe(\"The number to apply the magic function for\"),\n",
- " }),\n",
- " }\n",
- ");\n",
- "\n",
- "const llmWithTools = llmForTools.bindTools([magicFunctionTool]);\n",
- "const responseWithTools = await llmWithTools.invoke([new HumanMessage(\"What is the magic function of number 5?\")]);\n",
- "\n",
- "console.log(responseWithTools);"
- ]
- },
{
"cell_type": "markdown",
"id": "4fecf4e4",
diff --git a/docs/core_docs/docs/integrations/chat/deep_infra.mdx b/docs/core_docs/docs/integrations/chat/deep_infra.mdx
index 5e5805c84bf9..45eb8511c1e8 100644
--- a/docs/core_docs/docs/integrations/chat/deep_infra.mdx
+++ b/docs/core_docs/docs/integrations/chat/deep_infra.mdx
@@ -23,3 +23,8 @@ You'll need to obtain an API key and set it as an environment variable named `DE
import Example from "@examples/models/chat/integration_deepinfra.ts";
{Example}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/fake.mdx b/docs/core_docs/docs/integrations/chat/fake.mdx
index f9cc1b75b6bc..6f6470f04b7b 100644
--- a/docs/core_docs/docs/integrations/chat/fake.mdx
+++ b/docs/core_docs/docs/integrations/chat/fake.mdx
@@ -8,3 +8,8 @@ import CodeBlock from "@theme/CodeBlock";
import FakeListChatExample from "@examples/models/chat/integration_fake.ts";
{FakeListChatExample}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/fireworks.ipynb b/docs/core_docs/docs/integrations/chat/fireworks.ipynb
index aaaf3e03e75a..5dc46e93ce77 100644
--- a/docs/core_docs/docs/integrations/chat/fireworks.ipynb
+++ b/docs/core_docs/docs/integrations/chat/fireworks.ipynb
@@ -21,7 +21,9 @@
"source": [
"# ChatFireworks\n",
"\n",
- "This will help you getting started with `ChatFireworks` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatFireworks` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html).\n",
+ "[Fireworks AI](https://fireworks.ai/) is an AI inference platform to run and customize models. For a list of all models served by Fireworks see the [Fireworks docs](https://fireworks.ai/models).\n",
+ "\n",
+ "This guide will help you getting started with `ChatFireworks` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatFireworks` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html).\n",
"\n",
"## Overview\n",
"### Integration details\n",
@@ -31,6 +33,9 @@
"| [ChatFireworks](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_chat_models_fireworks.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | \n",
diff --git a/docs/core_docs/docs/integrations/chat/friendli.mdx b/docs/core_docs/docs/integrations/chat/friendli.mdx
index 32a64c3db307..4b1008bc749b 100644
--- a/docs/core_docs/docs/integrations/chat/friendli.mdx
+++ b/docs/core_docs/docs/integrations/chat/friendli.mdx
@@ -27,3 +27,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/models/chat/friendli.ts";
{Example}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb
index 192339ddce01..e46ecd104ceb 100644
--- a/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb
+++ b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb
@@ -1,7 +1,7 @@
{
"cells": [
{
- "cell_type": "markdown",
+ "cell_type": "raw",
"id": "46f7ac07",
"metadata": {},
"source": [
@@ -18,6 +18,8 @@
"source": [
"# ChatGoogleGenerativeAI\n",
"\n",
+ "[Google AI](https://ai.google.dev/) offers a number of different chat models, including the powerful Gemini series. For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini).\n",
+ "\n",
"This will help you getting started with `ChatGoogleGenerativeAI` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatGoogleGenerativeAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html).\n",
"\n",
"## Overview\n",
@@ -28,6 +30,9 @@
"| [ChatGoogleGenerativeAI](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html) | [@langchain/google-genai](https://api.js.langchain.com/modules/langchain_google_genai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-genai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-genai?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n",
@@ -50,7 +55,7 @@
"\n",
"### Credentials\n",
"\n",
- "Get an API key here: [ai.google.dev/tutorials/setup](https://ai.google.dev/tutorials/setup)\n",
+ "Get an API key here: [https://ai.google.dev/tutorials/setup](https://ai.google.dev/tutorials/setup)\n",
"\n",
"Then set the `GOOGLE_API_KEY` environment variable:\n",
"\n",
@@ -67,7 +72,7 @@
"\n",
"### Installation\n",
"\n",
- "The LangChain ChatGoogleGenerativeAI integration lives in the `@langchain/google-genai` package:\n",
+ "The LangChain `ChatGoogleGenerativeAI` integration lives in the `@langchain/google-genai` package:\n",
"\n",
"```{=mdx}\n",
"import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n",
@@ -318,18 +323,47 @@
},
{
"cell_type": "markdown",
- "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
+ "id": "6a44de56",
"metadata": {},
"source": [
- "## Tool calling\n",
+ "## Safety Settings\n",
"\n",
- "```{=mdx}\n",
+ "Gemini models have default safety settings that can be overridden. If you are receiving lots of \"Safety Warnings\" from your models, you can try tweaking the safety_settings attribute of the model. For example, to turn off safety blocking for dangerous content, you can import enums from the `@google/generative-ai` package, then construct your LLM as follows:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "92db2f25",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n",
+ "import { HarmBlockThreshold, HarmCategory } from \"@google/generative-ai\";\n",
"\n",
- ":::caution\n",
+ "const llmWithSafetySettings = new ChatGoogleGenerativeAI({\n",
+ " model: \"gemini-1.5-pro\",\n",
+ " temperature: 0,\n",
+ " safetySettings: [\n",
+ " {\n",
+ " category: HarmCategory.HARM_CATEGORY_HARASSMENT,\n",
+ " threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,\n",
+ " },\n",
+ " ],\n",
+ " // other params...\n",
+ "});"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
+ "metadata": {},
+ "source": [
+ "## Tool calling\n",
"\n",
- "The Google GenerativeAI API does not allow tool schemas to contain an object with unknown properties.\n",
+ "Tool calling with Google AI is mostly the same [as tool calling with other models](/docs/how_to/tool_calling), but has a few restrictions on schema.\n",
"\n",
- "For example, the following Zod schemas will throw an error:\n",
+ "The Google AI API does not allow tool schemas to contain an object with unknown properties. For example, the following Zod schemas will throw an error:\n",
"\n",
"`const invalidSchema = z.object({ properties: z.record(z.unknown()) });`\n",
"\n",
@@ -337,11 +371,7 @@
"\n",
"`const invalidSchema2 = z.record(z.unknown());`\n",
"\n",
- "Instead, you should explicitly define the properties of the object field.\n",
- "\n",
- ":::\n",
- "\n",
- "```\n"
+ "Instead, you should explicitly define the properties of the object field. Here's an example:"
]
},
{
@@ -398,173 +428,6 @@
"console.log(toolRes.tool_calls);"
]
},
- {
- "cell_type": "markdown",
- "id": "83061805",
- "metadata": {},
- "source": [
- "### `.withStructuredOutput`"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "ef24448c",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{\n",
- " url: 'https://www.accuweather.com/en/us/new-york-ny/10007/current-weather/349333',\n",
- " query: 'weather tonight'\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n",
- "import { z } from \"zod\";\n",
- "\n",
- "// Define your model\n",
- "const llmForWSO = new ChatGoogleGenerativeAI({\n",
- " model: \"gemini-pro\",\n",
- "});\n",
- "\n",
- "const browserSchema = z.object({\n",
- " url: z.string().describe(\"The URL of the webpage to search.\"),\n",
- " query: z.string().optional().describe(\"An optional search query to use.\"),\n",
- "});\n",
- "\n",
- "const llmWithStructuredOutput = llmForWSO.withStructuredOutput(browserSchema, {\n",
- " name: \"browser_tool\"\n",
- "})\n",
- "\n",
- "const structuredOutputRes = await llmWithStructuredOutput.invoke([\n",
- " [\n",
- " \"human\",\n",
- " \"Search the web and tell me what the weather will be like tonight in new york. use a popular weather website\",\n",
- " ],\n",
- "]);\n",
- "\n",
- "console.log(structuredOutputRes);"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "3987f0cb",
- "metadata": {},
- "source": [
- "## Multimodal support\n",
- "\n",
- "To provide an image, pass a human message with a `content` field set to an array of content objects. Each content object\n",
- "where each dict contains either an image value (type of image_url) or a text (type of text) value. The value of image_url must be a base64\n",
- "encoded image (e.g., data:image/png;base64,abcd124):"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "id": "0b60fc5d",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "AIMessage {\n",
- " \"content\": \"The image shows a hot dog in a bun, isolated against a white background. The hot dog is grilled and has a slightly crispy texture. The bun is soft and fluffy, and it appears to be lightly toasted. The hot dog is positioned horizontally, with the bun covering most of the sausage. The image captures the classic American snack food, highlighting its simplicity and appeal.\",\n",
- " \"additional_kwargs\": {\n",
- " \"finishReason\": \"STOP\",\n",
- " \"index\": 0,\n",
- " \"safetyRatings\": [\n",
- " {\n",
- " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n",
- " \"probability\": \"NEGLIGIBLE\"\n",
- " },\n",
- " {\n",
- " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n",
- " \"probability\": \"NEGLIGIBLE\"\n",
- " },\n",
- " {\n",
- " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n",
- " \"probability\": \"NEGLIGIBLE\"\n",
- " },\n",
- " {\n",
- " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n",
- " \"probability\": \"NEGLIGIBLE\"\n",
- " }\n",
- " ]\n",
- " },\n",
- " \"response_metadata\": {\n",
- " \"finishReason\": \"STOP\",\n",
- " \"index\": 0,\n",
- " \"safetyRatings\": [\n",
- " {\n",
- " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n",
- " \"probability\": \"NEGLIGIBLE\"\n",
- " },\n",
- " {\n",
- " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n",
- " \"probability\": \"NEGLIGIBLE\"\n",
- " },\n",
- " {\n",
- " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n",
- " \"probability\": \"NEGLIGIBLE\"\n",
- " },\n",
- " {\n",
- " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n",
- " \"probability\": \"NEGLIGIBLE\"\n",
- " }\n",
- " ]\n",
- " },\n",
- " \"tool_calls\": [],\n",
- " \"invalid_tool_calls\": [],\n",
- " \"usage_metadata\": {\n",
- " \"input_tokens\": 264,\n",
- " \"output_tokens\": 74,\n",
- " \"total_tokens\": 338\n",
- " }\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "import fs from \"fs\";\n",
- "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n",
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "\n",
- "// Multi-modal\n",
- "const llmWithVisionModel = new ChatGoogleGenerativeAI({\n",
- " model: \"gemini-1.5-flash\",\n",
- " maxOutputTokens: 2048,\n",
- " maxRetries: 1,\n",
- "});\n",
- "const image = fs.readFileSync(\"../../../../../examples/hotdog.jpg\").toString(\"base64\");\n",
- "const visionPrompt = ChatPromptTemplate.fromMessages([\n",
- " [\n",
- " \"human\", \n",
- " [\n",
- " {\n",
- " type: \"text\",\n",
- " text: \"Describe the following image.\",\n",
- " },\n",
- " {\n",
- " type: \"image_url\",\n",
- " image_url: \"data:image/png;base64,{image}\",\n",
- " },\n",
- " ]\n",
- " ]\n",
- "])\n",
- "\n",
- "const visionRes = await visionPrompt.pipe(llmWithVisionModel).invoke({\n",
- " image,\n",
- "});\n",
- "\n",
- "console.log(visionRes);"
- ]
- },
{
"cell_type": "markdown",
"id": "0c6a950f",
diff --git a/docs/core_docs/docs/integrations/chat/google_palm.mdx b/docs/core_docs/docs/integrations/chat/google_palm.mdx
index 981d6c4a8129..2bc7731744a8 100644
--- a/docs/core_docs/docs/integrations/chat/google_palm.mdx
+++ b/docs/core_docs/docs/integrations/chat/google_palm.mdx
@@ -131,3 +131,8 @@ looks like.
import ChatGoogleVertexAIExamples from "@examples/models/chat/integration_googlevertexai-examples_legacy.ts";
{ChatGoogleVertexAIExamples}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb b/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb
index 68f96df67e16..f52175843730 100644
--- a/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb
+++ b/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb
@@ -21,6 +21,8 @@
"source": [
"# ChatVertexAI\n",
"\n",
+ "[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundational models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc. For a full and updated list of available models visit VertexAI documentation.\n",
+ "\n",
"This will help you getting started with `ChatVertexAI` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatVertexAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html).\n",
"\n",
"## Overview\n",
@@ -35,6 +37,9 @@
"| [ChatVertexAI](https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html) | [@langchain/google-vertexai](https://api.js.langchain.com/modules/langchain_google_vertexai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-vertexai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-vertexai?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n",
@@ -45,7 +50,7 @@
"\n",
"### Credentials\n",
"\n",
- "Head to GCP and generate a credentials file. Once you've done this set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable:\n",
+ "Head to your [GCP account](https://console.cloud.google.com/) and generate a credentials file. Once you've done this set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable:\n",
"\n",
"```bash\n",
"export GOOGLE_APPLICATION_CREDENTIALS=\"path/to/your/credentials.json\"\n",
@@ -66,7 +71,7 @@
"\n",
"### Installation\n",
"\n",
- "The LangChain ChatVertexAI integration lives in the `@langchain/google-vertexai` package:\n",
+ "The LangChain `ChatVertexAI` integration lives in the `@langchain/google-vertexai` package:\n",
"\n",
"```{=mdx}\n",
"import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n",
@@ -78,7 +83,7 @@
" @langchain/google-vertexai\n",
"\n",
"\n",
- "Or if using in a web environment:\n",
+ "Or if using in a web environment like a [Vercel Edge function](https://vercel.com/blog/edge-functions-generally-available):\n",
"\n",
"\n",
" @langchain/google-vertexai-web\n",
@@ -236,260 +241,12 @@
"\n",
"const chain = prompt.pipe(llm);\n",
"await chain.invoke(\n",
- " {\n",
- " input_language: \"English\",\n",
- " output_language: \"German\",\n",
- " input: \"I love programming.\",\n",
- " }\n",
- ")"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
- "metadata": {},
- "source": [
- "## Multimodal\n",
- "\n",
- "The Gemini API can process multimodal inputs. The example below demonstrates how to do this:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "5981e230",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- " The image shows a hot dog in a bun. The hot dog is grilled and has a red color. The bun is white and soft.\n"
- ]
- }
- ],
- "source": [
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "import { ChatVertexAI } from \"@langchain/google-vertexai\";\n",
- "import fs from \"node:fs\";\n",
- "\n",
- "const llmForMultiModal = new ChatVertexAI({\n",
- " model: \"gemini-pro-vision\",\n",
- " temperature: 0.7,\n",
- "});\n",
- "\n",
- "const image = fs.readFileSync(\"../../../../../examples/hotdog.jpg\").toString(\"base64\");\n",
- "const promptForMultiModal = ChatPromptTemplate.fromMessages([\n",
- " [\n",
- " \"human\",\n",
- " [\n",
- " {\n",
- " type: \"text\",\n",
- " text: \"Describe the following image.\",\n",
- " },\n",
- " {\n",
- " type: \"image_url\",\n",
- " image_url: \"data:image/png;base64,{image_base64}\",\n",
- " },\n",
- " ],\n",
- " ],\n",
- "]);\n",
- "\n",
- "const multiModalRes = await promptForMultiModal.pipe(llmForMultiModal).invoke({\n",
- " image_base64: image,\n",
- "});\n",
- "\n",
- "console.log(multiModalRes.content);"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "aa6a51dd",
- "metadata": {},
- "source": [
- "## Tool calling\n",
- "\n",
- "`ChatVertexAI` also supports calling the model with a tool:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "id": "bc64485f",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[\n",
- " {\n",
- " name: 'calculator',\n",
- " args: { number2: 81623836, operation: 'multiply', number1: 1628253239 },\n",
- " id: 'a219d75748f445ab8c7ca8b516898e18',\n",
- " type: 'tool_call'\n",
- " }\n",
- "]\n"
- ]
- }
- ],
- "source": [
- "import { ChatVertexAI } from \"@langchain/google-vertexai\";\n",
- "import { zodToGeminiParameters } from \"@langchain/google-vertexai/utils\";\n",
- "import { z } from \"zod\";\n",
- "// Or, if using the web entrypoint:\n",
- "// import { ChatVertexAI } from \"@langchain/google-vertexai-web\";\n",
- "\n",
- "const calculatorSchema = z.object({\n",
- " operation: z\n",
- " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n",
- " .describe(\"The type of operation to execute\"),\n",
- " number1: z.number().describe(\"The first number to operate on.\"),\n",
- " number2: z.number().describe(\"The second number to operate on.\"),\n",
- "});\n",
- "\n",
- "const geminiCalculatorTool = {\n",
- " functionDeclarations: [\n",
- " {\n",
- " name: \"calculator\",\n",
- " description: \"A simple calculator tool\",\n",
- " parameters: zodToGeminiParameters(calculatorSchema),\n",
- " },\n",
- " ],\n",
- "};\n",
- "\n",
- "const llmWithTool = new ChatVertexAI({\n",
- " temperature: 0.7,\n",
- " model: \"gemini-1.5-flash-001\",\n",
- "}).bindTools([geminiCalculatorTool]);\n",
- "\n",
- "const toolRes = await llmWithTool.invoke(\"What is 1628253239 times 81623836?\");\n",
- "console.dir(toolRes.tool_calls, { depth: null });"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "46ce27ae",
- "metadata": {},
- "source": [
- "### `withStructuredOutput`\n",
- "\n",
- "Alternatively, you can also use the `withStructuredOutput` method:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "id": "012a9afc",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{ operation: 'multiply', number1: 1628253239, number2: 81623836 }\n"
- ]
- }
- ],
- "source": [
- "import { ChatVertexAI } from \"@langchain/google-vertexai\";\n",
- "import { z } from \"zod\";\n",
- "// Or, if using the web entrypoint:\n",
- "// import { ChatVertexAI } from \"@langchain/google-vertexai-web\";\n",
- "\n",
- "const calculatorSchemaForWSO = z.object({\n",
- " operation: z\n",
- " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n",
- " .describe(\"The type of operation to execute\"),\n",
- " number1: z.number().describe(\"The first number to operate on.\"),\n",
- " number2: z.number().describe(\"The second number to operate on.\"),\n",
- "});\n",
- "\n",
- "const llmWithStructuredOutput = new ChatVertexAI({\n",
- " temperature: 0.7,\n",
- " model: \"gemini-1.5-flash-001\",\n",
- "}).withStructuredOutput(calculatorSchemaForWSO, {\n",
- " name: \"calculator\"\n",
- "});\n",
- "\n",
- "const wsoRes = await llmWithStructuredOutput.invoke(\"What is 1628253239 times 81623836?\");\n",
- "console.log(wsoRes);"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "3b306e5b",
- "metadata": {},
- "source": [
- "## VertexAI tools agent\n",
- "\n",
- "The Gemini family of models not only support tool calling, but can also be used in the Tool Calling agent.\n",
- "Here's an example:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "id": "0391002b",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "The weather in Paris, France is 28 degrees Celsius. \n",
- "\n"
- ]
- }
- ],
- "source": [
- "import { z } from \"zod\";\n",
- "\n",
- "import { tool } from \"@langchain/core/tools\";\n",
- "import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n",
- "\n",
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "import { ChatVertexAI } from \"@langchain/google-vertexai\";\n",
- "// Uncomment this if you're running inside a web/edge environment.\n",
- "// import { ChatVertexAI } from \"@langchain/google-vertexai-web\";\n",
- "\n",
- "const llmAgent = new ChatVertexAI({\n",
- " temperature: 0,\n",
- " model: \"gemini-1.5-pro\",\n",
- "});\n",
- "\n",
- "// Prompt template must have \"input\" and \"agent_scratchpad input variables\"\n",
- "const agentPrompt = ChatPromptTemplate.fromMessages([\n",
- " [\"system\", \"You are a helpful assistant\"],\n",
- " [\"placeholder\", \"{chat_history}\"],\n",
- " [\"human\", \"{input}\"],\n",
- " [\"placeholder\", \"{agent_scratchpad}\"],\n",
- "]);\n",
- "\n",
- "// Mocked tool\n",
- "const currentWeatherTool = tool(async () => \"28 °C\", {\n",
- " name: \"get_current_weather\",\n",
- " description: \"Get the current weather in a given location\",\n",
- " schema: z.object({\n",
- " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n",
- " }),\n",
- "});\n",
- "\n",
- "const agent = await createToolCallingAgent({\n",
- " llm: llmAgent,\n",
- " tools: [currentWeatherTool],\n",
- " prompt: agentPrompt,\n",
- "});\n",
- "\n",
- "const agentExecutor = new AgentExecutor({\n",
- " agent,\n",
- " tools: [currentWeatherTool],\n",
- "});\n",
- "\n",
- "const input = \"What's the weather like in Paris?\";\n",
- "const agentRes = await agentExecutor.invoke({ input });\n",
- "\n",
- "console.log(agentRes.output);"
+ " {\n",
+ " input_language: \"English\",\n",
+ " output_language: \"German\",\n",
+ " input: \"I love programming.\",\n",
+ " }\n",
+ ");"
]
},
{
diff --git a/docs/core_docs/docs/integrations/chat/groq.ipynb b/docs/core_docs/docs/integrations/chat/groq.ipynb
index 342a48ce48a8..112cd53fd6b9 100644
--- a/docs/core_docs/docs/integrations/chat/groq.ipynb
+++ b/docs/core_docs/docs/integrations/chat/groq.ipynb
@@ -21,6 +21,8 @@
"source": [
"# ChatGroq\n",
"\n",
+ "[Groq](https://groq.com/) is a company that offers fast AI inference, powered by LPU™ AI inference technology which delivers fast, affordable, and energy efficient AI.\n",
+ "\n",
"This will help you getting started with ChatGroq [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatGroq features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html).\n",
"\n",
"## Overview\n",
@@ -31,13 +33,16 @@
"| [ChatGroq](https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html) | [@langchain/groq](https://api.js.langchain.com/modules/langchain_groq.html) | ❌ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/groq?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/groq?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | \n",
"\n",
"## Setup\n",
"\n",
- "To access ChatGroq models you'll need to create a ChatGroq account, get an API key, and install the `@langchain/groq` integration package.\n",
+ "To access ChatGroq models you'll need to create a Groq account, get an API key, and install the `@langchain/groq` integration package.\n",
"\n",
"### Credentials\n",
"\n",
@@ -229,320 +234,6 @@
")"
]
},
- {
- "cell_type": "markdown",
- "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
- "metadata": {},
- "source": [
- "## Tool calling\n",
- "\n",
- "Groq chat models support calling multiple functions to get all required data to answer a question.\n",
- "Here's an example:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "aa42d55a",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[\n",
- " {\n",
- " name: 'get_current_weather',\n",
- " args: { location: 'San Francisco', unit: 'fahrenheit' },\n",
- " type: 'tool_call',\n",
- " id: 'call_1mpy'\n",
- " }\n",
- "]\n"
- ]
- }
- ],
- "source": [
- "import { tool } from \"@langchain/core/tools\";\n",
- "import { ChatGroq } from \"@langchain/groq\";\n",
- "import { z } from \"zod\";\n",
- "\n",
- "// Mocked out function, could be a database/API call in production\n",
- "const getCurrentWeatherTool = tool((input) => {\n",
- " if (input.location.toLowerCase().includes(\"tokyo\")) {\n",
- " return JSON.stringify({ location: input.location, temperature: \"10\", unit: \"celsius\" });\n",
- " } else if (input.location.toLowerCase().includes(\"san francisco\")) {\n",
- " return JSON.stringify({\n",
- " location: input.location,\n",
- " temperature: \"72\",\n",
- " unit: \"fahrenheit\",\n",
- " });\n",
- " } else {\n",
- " return JSON.stringify({ location: input.location, temperature: \"22\", unit: \"celsius\" });\n",
- " }\n",
- "}, {\n",
- " name: \"get_current_weather\",\n",
- " description: \"Get the current weather in a given location\",\n",
- " schema: z.object({\n",
- " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n",
- " unit: z.enum([\"celsius\", \"fahrenheit\"]).optional(),\n",
- " }),\n",
- "})\n",
- "\n",
- "// Bind function to the model as a tool\n",
- "const llmWithTools = new ChatGroq({\n",
- " model: \"mixtral-8x7b-32768\",\n",
- " maxTokens: 128,\n",
- "}).bindTools([getCurrentWeatherTool], {\n",
- " tool_choice: \"auto\",\n",
- "});\n",
- "\n",
- "const resWithTools = await llmWithTools.invoke([\n",
- " [\"human\", \"What's the weather like in San Francisco?\"],\n",
- "]);\n",
- "\n",
- "console.dir(resWithTools.tool_calls, { depth: null });"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "ae6d3948",
- "metadata": {},
- "source": [
- "### `.withStructuredOutput({ ... })`\n",
- "\n",
- "```{=mdx}\n",
- "\n",
- ":::info\n",
- "The `.withStructuredOutput` method is in beta. It is actively being worked on, so the API may change.\n",
- ":::\n",
- "\n",
- "```\n",
- "\n",
- "You can also use the `.withStructuredOutput({ ... })` method to coerce `ChatGroq` into returning a structured output.\n",
- "\n",
- "The method allows for passing in either a Zod object, or a valid JSON schema (like what is returned from [`zodToJsonSchema`](https://www.npmjs.com/package/zod-to-json-schema)).\n",
- "\n",
- "Using the method is simple. Just define your LLM and call `.withStructuredOutput({ ... })` on it, passing the desired schema.\n",
- "\n",
- "Here is an example using a Zod schema and the `functionCalling` mode (default mode):"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "1ad6c77d",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{ operation: 'add', number1: 2, number2: 2 }\n"
- ]
- }
- ],
- "source": [
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "import { ChatGroq } from \"@langchain/groq\";\n",
- "import { z } from \"zod\";\n",
- "\n",
- "const calculatorSchema = z.object({\n",
- " operation: z.enum([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n",
- " number1: z.number(),\n",
- " number2: z.number(),\n",
- "});\n",
- "\n",
- "const llmForWSO = new ChatGroq({\n",
- " temperature: 0,\n",
- " model: \"mixtral-8x7b-32768\",\n",
- "});\n",
- "const modelWithStructuredOutput = llmForWSO.withStructuredOutput(calculatorSchema);\n",
- "\n",
- "const promptWSO = ChatPromptTemplate.fromMessages([\n",
- " [\"system\", \"You are VERY bad at math and must always use a calculator.\"],\n",
- " [\"human\", \"Please help me!! What is 2 + 2?\"],\n",
- "]);\n",
- "const chainWSO = promptWSO.pipe(modelWithStructuredOutput);\n",
- "const resultWSO = await chainWSO.invoke({});\n",
- "console.log(resultWSO);"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "24757550",
- "metadata": {},
- "source": [
- "You can also specify 'includeRaw' to return the parsed and raw output in the result."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "id": "1d13ed6f",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{\n",
- " raw: AIMessage {\n",
- " lc_serializable: true,\n",
- " lc_kwargs: {\n",
- " content: '',\n",
- " additional_kwargs: {\n",
- " tool_calls: [\n",
- " {\n",
- " id: 'call_7z1y',\n",
- " type: 'function',\n",
- " function: {\n",
- " name: 'calculator',\n",
- " arguments: '{\"number1\":2,\"number2\":2,\"operation\":\"add\"}'\n",
- " }\n",
- " }\n",
- " ]\n",
- " },\n",
- " tool_calls: [\n",
- " {\n",
- " name: 'calculator',\n",
- " args: { number1: 2, number2: 2, operation: 'add' },\n",
- " type: 'tool_call',\n",
- " id: 'call_7z1y'\n",
- " }\n",
- " ],\n",
- " invalid_tool_calls: [],\n",
- " response_metadata: {}\n",
- " },\n",
- " lc_namespace: [ 'langchain_core', 'messages' ],\n",
- " content: '',\n",
- " name: undefined,\n",
- " additional_kwargs: {\n",
- " tool_calls: [\n",
- " {\n",
- " id: 'call_7z1y',\n",
- " type: 'function',\n",
- " function: {\n",
- " name: 'calculator',\n",
- " arguments: '{\"number1\":2,\"number2\":2,\"operation\":\"add\"}'\n",
- " }\n",
- " }\n",
- " ]\n",
- " },\n",
- " response_metadata: {\n",
- " tokenUsage: { completionTokens: 111, promptTokens: 1257, totalTokens: 1368 },\n",
- " finish_reason: 'tool_calls'\n",
- " },\n",
- " id: undefined,\n",
- " tool_calls: [\n",
- " {\n",
- " name: 'calculator',\n",
- " args: { number1: 2, number2: 2, operation: 'add' },\n",
- " type: 'tool_call',\n",
- " id: 'call_7z1y'\n",
- " }\n",
- " ],\n",
- " invalid_tool_calls: [],\n",
- " usage_metadata: undefined\n",
- " },\n",
- " parsed: { operation: 'add', number1: 2, number2: 2 }\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "const includeRawModel = llmForWSO.withStructuredOutput(calculatorSchema, {\n",
- " name: \"calculator\",\n",
- " includeRaw: true,\n",
- "});\n",
- "\n",
- "const includeRawChain = promptWSO.pipe(includeRawModel);\n",
- "const includeRawResult = await includeRawChain.invoke(\"\");\n",
- "console.dir(includeRawResult, { depth: null });"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "7944c7c3",
- "metadata": {},
- "source": [
- "## Streaming\n",
- "\n",
- "Groq's API also supports streaming token responses. The example below demonstrates how to use this feature."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "4ae5fb48",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "stream: \n",
- "stream: Hello\n",
- "stream: Hello!\n",
- "stream: Hello! I\n",
- "stream: Hello! I'\n",
- "stream: Hello! I'm\n",
- "stream: Hello! I'm here\n",
- "stream: Hello! I'm here to\n",
- "stream: Hello! I'm here to help\n",
- "stream: Hello! I'm here to help you\n",
- "stream: Hello! I'm here to help you.\n",
- "stream: Hello! I'm here to help you. Is\n",
- "stream: Hello! I'm here to help you. Is there\n",
- "stream: Hello! I'm here to help you. Is there something\n",
- "stream: Hello! I'm here to help you. Is there something you\n",
- "stream: Hello! I'm here to help you. Is there something you would\n",
- "stream: Hello! I'm here to help you. Is there something you would like\n",
- "stream: Hello! I'm here to help you. Is there something you would like to\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with?\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to ask\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to ask me\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to ask me anything\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to ask me anything.\n",
- "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to ask me anything.\n"
- ]
- }
- ],
- "source": [
- "import { ChatGroq } from \"@langchain/groq\";\n",
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n",
- "\n",
- "const llmForStreaming = new ChatGroq({\n",
- " apiKey: process.env.GROQ_API_KEY,\n",
- "});\n",
- "const promptForStreaming = ChatPromptTemplate.fromMessages([\n",
- " [\"system\", \"You are a helpful assistant\"],\n",
- " [\"human\", \"{input}\"],\n",
- "]);\n",
- "const outputParserForStreaming = new StringOutputParser();\n",
- "const chainForStreaming = promptForStreaming.pipe(llmForStreaming).pipe(outputParserForStreaming);\n",
- "const streamRes = await chainForStreaming.stream({\n",
- " input: \"Hello\",\n",
- "});\n",
- "let streamedRes = \"\";\n",
- "for await (const item of streamRes) {\n",
- " streamedRes += item;\n",
- " console.log(\"stream:\", streamedRes);\n",
- "}"
- ]
- },
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
diff --git a/docs/core_docs/docs/integrations/chat/llama_cpp.mdx b/docs/core_docs/docs/integrations/chat/llama_cpp.mdx
index 58854e1418e9..ecf8e197014b 100644
--- a/docs/core_docs/docs/integrations/chat/llama_cpp.mdx
+++ b/docs/core_docs/docs/integrations/chat/llama_cpp.mdx
@@ -76,3 +76,8 @@ Using the `invoke` method, we can also achieve stream generation, and use `signa
import StreamInvokeExample from "@examples/models/chat/integration_llama_cpp_stream_invoke.ts";
{StreamInvokeExample}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/minimax.mdx b/docs/core_docs/docs/integrations/chat/minimax.mdx
index 302d49028e64..df70ea1ee0af 100644
--- a/docs/core_docs/docs/integrations/chat/minimax.mdx
+++ b/docs/core_docs/docs/integrations/chat/minimax.mdx
@@ -73,3 +73,8 @@ This feature supports calling tools like a search engine to get additional data
import MinimaxPlugins from "@examples/models/chat/minimax_plugins.ts";
{MinimaxPlugins}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/mistral.ipynb b/docs/core_docs/docs/integrations/chat/mistral.ipynb
index f3f61fec8bff..9e58985f21e1 100644
--- a/docs/core_docs/docs/integrations/chat/mistral.ipynb
+++ b/docs/core_docs/docs/integrations/chat/mistral.ipynb
@@ -21,6 +21,8 @@
"source": [
"# ChatMistralAI\n",
"\n",
+ "[Mistral AI](https://mistral.ai/) is a platform that offers hosting for their powerful [open source models](https://docs.mistral.ai/getting-started/models/).\n",
+ "\n",
"This will help you getting started with ChatMistralAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatMistralAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html).\n",
"\n",
"## Overview\n",
@@ -31,6 +33,9 @@
"| [ChatMistralAI](https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html) | [@langchain/mistralai](https://api.js.langchain.com/modules/langchain_mistralai.html) | ❌ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/mistralai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mistralai?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | \n",
@@ -91,9 +96,8 @@
"import { ChatMistralAI } from \"@langchain/mistralai\" \n",
"\n",
"const llm = new ChatMistralAI({\n",
- " model: \"mistral-small\",\n",
+ " model: \"mistral-large-latest\",\n",
" temperature: 0,\n",
- " maxTokens: undefined,\n",
" maxRetries: 2,\n",
" // other params...\n",
"})"
@@ -126,22 +130,22 @@
"output_type": "stream",
"text": [
"AIMessage {\n",
- " \"content\": \"Sure, I'd be happy to help you translate that sentence into French! The English sentence \\\"I love programming\\\" translates to \\\"J'aime programmer\\\" in French. Let me know if you have any other questions or need further assistance!\",\n",
+ " \"content\": \"J'adore la programmation.\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
- " \"completionTokens\": 52,\n",
- " \"promptTokens\": 32,\n",
- " \"totalTokens\": 84\n",
+ " \"completionTokens\": 9,\n",
+ " \"promptTokens\": 27,\n",
+ " \"totalTokens\": 36\n",
" },\n",
" \"finish_reason\": \"stop\"\n",
" },\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
- " \"input_tokens\": 32,\n",
- " \"output_tokens\": 52,\n",
- " \"total_tokens\": 84\n",
+ " \"input_tokens\": 27,\n",
+ " \"output_tokens\": 9,\n",
+ " \"total_tokens\": 36\n",
" }\n",
"}\n"
]
@@ -168,7 +172,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "Sure, I'd be happy to help you translate that sentence into French! The English sentence \"I love programming\" translates to \"J'aime programmer\" in French. Let me know if you have any other questions or need further assistance!\n"
+ "J'adore la programmation.\n"
]
}
],
@@ -197,22 +201,22 @@
"output_type": "stream",
"text": [
"AIMessage {\n",
- " \"content\": \"Ich liebe Programmierung. (German translation)\",\n",
+ " \"content\": \"Ich liebe Programmieren.\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
- " \"completionTokens\": 12,\n",
- " \"promptTokens\": 26,\n",
- " \"totalTokens\": 38\n",
+ " \"completionTokens\": 7,\n",
+ " \"promptTokens\": 21,\n",
+ " \"totalTokens\": 28\n",
" },\n",
" \"finish_reason\": \"stop\"\n",
" },\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
- " \"input_tokens\": 26,\n",
- " \"output_tokens\": 12,\n",
- " \"total_tokens\": 38\n",
+ " \"input_tokens\": 21,\n",
+ " \"output_tokens\": 7,\n",
+ " \"total_tokens\": 28\n",
" }\n",
"}\n"
]
@@ -248,13 +252,14 @@
"source": [
"## Tool calling\n",
"\n",
- "Mistral's API now supports tool calling and JSON mode!\n",
- "The examples below demonstrates how to use them, along with how to use the `withStructuredOutput` method to easily compose structured output LLM calls."
+ "Mistral's API supports [tool calling](/docs/concepts#functiontool-calling) for a subset of their models. You can see which models support tool calling [on this page](https://docs.mistral.ai/capabilities/function_calling/).\n",
+ "\n",
+ "The examples below demonstrates how to use it:"
]
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": 5,
"id": "98d9034c",
"metadata": {},
"outputs": [
@@ -267,7 +272,7 @@
" name: 'calculator',\n",
" args: { operation: 'add', number1: 2, number2: 2 },\n",
" type: 'tool_call',\n",
- " id: 'Tn8X3UCSP'\n",
+ " id: 'DD9diCL1W'\n",
" }\n",
"]\n"
]
@@ -298,9 +303,7 @@
"// Bind the tool to the model\n",
"const modelWithTool = new ChatMistralAI({\n",
" model: \"mistral-large-latest\",\n",
- "}).bind({\n",
- " tools: [calculatorTool],\n",
- "});\n",
+ "}).bindTools([calculatorTool]);\n",
"\n",
"\n",
"const calcToolPrompt = ChatPromptTemplate.fromMessages([\n",
@@ -320,315 +323,6 @@
"console.log(calcToolRes.tool_calls);"
]
},
- {
- "cell_type": "markdown",
- "id": "e6ff9f76",
- "metadata": {},
- "source": [
- "### `.withStructuredOutput({ ... })`\n",
- "\n",
- "Using the `.withStructuredOutput` method, you can easily make the LLM return structured output, given only a Zod or JSON schema:\n",
- "\n",
- "```{=mdx}\n",
- "\n",
- ":::note\n",
- "The Mistral tool calling API requires descriptions for each tool field. If descriptions are not supplied, the API will error.\n",
- ":::\n",
- "\n",
- "```"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "a8638d82",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{ operation: 'add', number1: 2, number2: 2 }\n"
- ]
- }
- ],
- "source": [
- "import { ChatMistralAI } from \"@langchain/mistralai\";\n",
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "import { z } from \"zod\";\n",
- "\n",
- "const calculatorSchemaForWSO = z\n",
- " .object({\n",
- " operation: z\n",
- " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n",
- " .describe(\"The type of operation to execute.\"),\n",
- " number1: z.number().describe(\"The first number to operate on.\"),\n",
- " number2: z.number().describe(\"The second number to operate on.\"),\n",
- " })\n",
- " .describe(\"A simple calculator tool\");\n",
- "\n",
- "const llmForWSO = new ChatMistralAI({\n",
- " model: \"mistral-large-latest\",\n",
- "})\n",
- "\n",
- "// Pass the schema and tool name to the withStructuredOutput method\n",
- "const modelWithStructuredOutput = llmForWSO.withStructuredOutput(calculatorSchemaForWSO, {\n",
- " name: \"calculator\",\n",
- "});\n",
- "\n",
- "const promptForWSO = ChatPromptTemplate.fromMessages([\n",
- " [\n",
- " \"system\",\n",
- " \"You are a helpful assistant who always needs to use a calculator.\",\n",
- " ],\n",
- " [\"human\", \"{input}\"],\n",
- "]);\n",
- "\n",
- "// Chain your prompt and model together\n",
- "const chainWSO = promptForWSO.pipe(modelWithStructuredOutput);\n",
- "\n",
- "const responseWSO = await chainWSO.invoke({\n",
- " input: \"What is 2 + 2?\",\n",
- "});\n",
- "console.log(responseWSO);"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "38d8a048",
- "metadata": {},
- "source": [
- "You can supply a \"name\" field to give the LLM additional context around what you are trying to generate. You can also pass 'includeRaw' to get the raw message back from the model too."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "id": "9786b41a",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{\n",
- " raw: AIMessage {\n",
- " lc_serializable: true,\n",
- " lc_kwargs: {\n",
- " content: '',\n",
- " tool_calls: [\n",
- " {\n",
- " name: 'calculator',\n",
- " args: { operation: 'add', number1: 2, number2: 2 },\n",
- " type: 'tool_call',\n",
- " id: 'w48T6Nc3d'\n",
- " }\n",
- " ],\n",
- " invalid_tool_calls: [],\n",
- " additional_kwargs: {\n",
- " tool_calls: [\n",
- " {\n",
- " id: 'w48T6Nc3d',\n",
- " function: {\n",
- " name: 'calculator',\n",
- " arguments: '{\"operation\": \"add\", \"number1\": 2, \"number2\": 2}'\n",
- " },\n",
- " type: 'function'\n",
- " }\n",
- " ]\n",
- " },\n",
- " usage_metadata: { input_tokens: 205, output_tokens: 34, total_tokens: 239 },\n",
- " response_metadata: {}\n",
- " },\n",
- " lc_namespace: [ 'langchain_core', 'messages' ],\n",
- " content: '',\n",
- " name: undefined,\n",
- " additional_kwargs: {\n",
- " tool_calls: [\n",
- " {\n",
- " id: 'w48T6Nc3d',\n",
- " function: {\n",
- " name: 'calculator',\n",
- " arguments: '{\"operation\": \"add\", \"number1\": 2, \"number2\": 2}'\n",
- " },\n",
- " type: 'function'\n",
- " }\n",
- " ]\n",
- " },\n",
- " response_metadata: {\n",
- " tokenUsage: { completionTokens: 34, promptTokens: 205, totalTokens: 239 },\n",
- " finish_reason: 'tool_calls'\n",
- " },\n",
- " id: undefined,\n",
- " tool_calls: [\n",
- " {\n",
- " name: 'calculator',\n",
- " args: { operation: 'add', number1: 2, number2: 2 },\n",
- " type: 'tool_call',\n",
- " id: 'w48T6Nc3d'\n",
- " }\n",
- " ],\n",
- " invalid_tool_calls: [],\n",
- " usage_metadata: { input_tokens: 205, output_tokens: 34, total_tokens: 239 }\n",
- " },\n",
- " parsed: { operation: 'add', number1: 2, number2: 2 }\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "const includeRawModel = llmForWSO.withStructuredOutput(calculatorSchemaForWSO, {\n",
- " name: \"calculator\",\n",
- " includeRaw: true,\n",
- "});\n",
- "const includeRawChain = promptForWSO.pipe(includeRawModel);\n",
- "\n",
- "const includeRawResponse = await includeRawChain.invoke({\n",
- " input: \"What is 2 + 2?\",\n",
- "});\n",
- "console.dir(includeRawResponse, { depth: null });"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "6b7b374f",
- "metadata": {},
- "source": [
- "### Using JSON schema:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "id": "9f1dc9bd",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{ operation: 'add', number1: 2, number2: 2 }\n"
- ]
- }
- ],
- "source": [
- "import { ChatMistralAI } from \"@langchain/mistralai\";\n",
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "\n",
- "const calculatorJsonSchema = {\n",
- " type: \"object\",\n",
- " properties: {\n",
- " operation: {\n",
- " type: \"string\",\n",
- " enum: [\"add\", \"subtract\", \"multiply\", \"divide\"],\n",
- " description: \"The type of operation to execute.\",\n",
- " },\n",
- " number1: { type: \"number\", description: \"The first number to operate on.\" },\n",
- " number2: {\n",
- " type: \"number\",\n",
- " description: \"The second number to operate on.\",\n",
- " },\n",
- " },\n",
- " required: [\"operation\", \"number1\", \"number2\"],\n",
- " description: \"A simple calculator tool\",\n",
- "};\n",
- "\n",
- "const llmForJsonSchema = new ChatMistralAI({\n",
- " model: \"mistral-large-latest\",\n",
- "});\n",
- "\n",
- "// Pass the schema and tool name to the withStructuredOutput method\n",
- "const modelWithJsonSchemaTool = llmForJsonSchema.withStructuredOutput(calculatorJsonSchema);\n",
- "\n",
- "const promptForJsonSchema = ChatPromptTemplate.fromMessages([\n",
- " [\n",
- " \"system\",\n",
- " \"You are a helpful assistant who always needs to use a calculator.\",\n",
- " ],\n",
- " [\"human\", \"{input}\"],\n",
- "]);\n",
- "\n",
- "// Chain your prompt and model together\n",
- "const chainWithJsonSchema = promptForJsonSchema.pipe(modelWithJsonSchemaTool);\n",
- "\n",
- "const responseFromJsonSchema = await chainWithJsonSchema.invoke({\n",
- " input: \"What is 2 + 2?\",\n",
- "});\n",
- "console.log(responseFromJsonSchema);\n"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "3c8bc1d4",
- "metadata": {},
- "source": [
- "### Tool calling agent\n",
- "\n",
- "The larger Mistral models not only support tool calling, but can also be used in the Tool Calling agent.\n",
- "Here's an example:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "76bd0061",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "It's 28 °C in Paris.\n"
- ]
- }
- ],
- "source": [
- "import { z } from \"zod\";\n",
- "import { ChatMistralAI } from \"@langchain/mistralai\";\n",
- "import { tool } from \"@langchain/core/tools\";\n",
- "import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n",
- "\n",
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "\n",
- "const llmForAgent = new ChatMistralAI({\n",
- " temperature: 0,\n",
- " model: \"mistral-large-latest\",\n",
- "});\n",
- "\n",
- "// Prompt template must have \"input\" and \"agent_scratchpad input variables\"\n",
- "const agentPrompt = ChatPromptTemplate.fromMessages([\n",
- " [\"system\", \"You are a helpful assistant\"],\n",
- " [\"placeholder\", \"{chat_history}\"],\n",
- " [\"human\", \"{input}\"],\n",
- " [\"placeholder\", \"{agent_scratchpad}\"],\n",
- "]);\n",
- "\n",
- "// Mocked tool\n",
- "const currentWeatherToolForAgent = tool(async () => \"28 °C\", {\n",
- " name: \"get_current_weather\",\n",
- " description: \"Get the current weather in a given location\",\n",
- " schema: z.object({\n",
- " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n",
- " }),\n",
- "});\n",
- "\n",
- "const agent = createToolCallingAgent({\n",
- " llm: llmForAgent,\n",
- " tools: [currentWeatherToolForAgent],\n",
- " prompt: agentPrompt,\n",
- "});\n",
- "\n",
- "const agentExecutor = new AgentExecutor({\n",
- " agent,\n",
- " tools: [currentWeatherToolForAgent],\n",
- "});\n",
- "\n",
- "const agentInput = \"What's the weather like in Paris?\";\n",
- "const agentRes = await agentExecutor.invoke({ input: agentInput });\n",
- "\n",
- "console.log(agentRes.output);\n"
- ]
- },
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
diff --git a/docs/core_docs/docs/integrations/chat/moonshot.mdx b/docs/core_docs/docs/integrations/chat/moonshot.mdx
index 2264f04016eb..6f3479335494 100644
--- a/docs/core_docs/docs/integrations/chat/moonshot.mdx
+++ b/docs/core_docs/docs/integrations/chat/moonshot.mdx
@@ -33,3 +33,8 @@ Here's an example:
import Moonshot from "@examples/models/chat/integration_moonshot.ts";
{Moonshot}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/ni_bittensor.mdx b/docs/core_docs/docs/integrations/chat/ni_bittensor.mdx
index 5fd40b9c9c03..3ce1be4ff745 100644
--- a/docs/core_docs/docs/integrations/chat/ni_bittensor.mdx
+++ b/docs/core_docs/docs/integrations/chat/ni_bittensor.mdx
@@ -27,3 +27,8 @@ console.log({ res });
}
*/
```
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/ollama.ipynb b/docs/core_docs/docs/integrations/chat/ollama.ipynb
index 1ed84b3130f8..6c23a5465cb1 100644
--- a/docs/core_docs/docs/integrations/chat/ollama.ipynb
+++ b/docs/core_docs/docs/integrations/chat/ollama.ipynb
@@ -21,30 +21,34 @@
"source": [
"# ChatOllama\n",
"\n",
- "This will help you getting started with `ChatOllama` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatOllama` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html).\n",
+ "[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 3.1, locally.\n",
+ "\n",
+ "Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage.\n",
+ "\n",
+ "This guide will help you getting started with `ChatOllama` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatOllama` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html).\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
- "[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally.\n",
+ "Ollama allows you to use a wide range of models with different capabilities. Some of the fields in the details table below only apply to a subset of models that Ollama offers.\n",
"\n",
- "Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage.\n",
- "\n",
- "This example goes over how to use LangChain to interact with an Ollama-run Llama 2 7b instance as a chat model.\n",
- "For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/jmorganca/ollama#model-library).\n",
+ "For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.com/search) and search by tag.\n",
"\n",
"| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/v0.2/docs/integrations/chat/ollama) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [ChatOllama](https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html) | [@langchain/ollama](https://api.js.langchain.com/modules/langchain_ollama.html) | ✅ | beta | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/ollama?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/ollama?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n",
"\n",
"## Setup\n",
"\n",
- "Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance. Then, download the `@langchain/ollama` package.\n",
+ "Follow [these instructions](https://github.com/ollama/ollama) to set up and run a local Ollama instance. Then, download the `@langchain/ollama` package.\n",
"\n",
"### Credentials\n",
"\n",
@@ -252,7 +256,7 @@
"source": [
"## Tools\n",
"\n",
- "Ollama now offers support for native tool calling. The example below demonstrates how you can invoke a tool from an Ollama model."
+ "Ollama now offers support for native tool calling [for a subset of their available models](https://ollama.com/search?c=tools). The example below demonstrates how you can invoke a tool from an Ollama model."
]
},
{
@@ -335,7 +339,7 @@
"source": [
"### `.withStructuredOutput`\n",
"\n",
- "Since `ChatOllama` supports the `.bindTools()` method, you can also call `.withStructuredOutput()` to get a structured output from the tool."
+ "For [models that support tool calling](https://ollama.com/search?c=tools), you can also call `.withStructuredOutput()` to get a structured output from the tool."
]
},
{
@@ -384,7 +388,7 @@
"source": [
"### JSON mode\n",
"\n",
- "Ollama also supports a JSON mode that coerces model outputs to only return JSON. Here's an example of how this can be useful for extraction:"
+ "Ollama also supports a JSON mode for all chat models that coerces model outputs to only return JSON. Here's an example of how this can be useful for extraction:"
]
},
{
@@ -459,7 +463,7 @@
"## Multimodal models\n",
"\n",
"Ollama supports open source multimodal models like [LLaVA](https://ollama.ai/library/llava) in versions 0.1.15 and up.\n",
- "You can pass images as part of a message's `content` field to multimodal-capable models like this:"
+ "You can pass images as part of a message's `content` field to [multimodal-capable](/docs/how_to/multimodal_inputs/) models like this:"
]
},
{
diff --git a/docs/core_docs/docs/integrations/chat/ollama_functions.mdx b/docs/core_docs/docs/integrations/chat/ollama_functions.mdx
index 8dd6ad935f22..3c009b3539be 100644
--- a/docs/core_docs/docs/integrations/chat/ollama_functions.mdx
+++ b/docs/core_docs/docs/integrations/chat/ollama_functions.mdx
@@ -68,3 +68,8 @@ Because different models have different strengths, it may be helpful to pass in
import OllamaFunctionsCustomPrompt from "@examples/models/chat/ollama_functions/custom_prompt.ts";
{OllamaFunctionsCustomPrompt}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/openai.ipynb b/docs/core_docs/docs/integrations/chat/openai.ipynb
index d63d918e2da9..8dbf079948d8 100644
--- a/docs/core_docs/docs/integrations/chat/openai.ipynb
+++ b/docs/core_docs/docs/integrations/chat/openai.ipynb
@@ -21,7 +21,9 @@
"source": [
"# ChatOpenAI\n",
"\n",
- "This will help you getting started with ChatOpenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html).\n",
+ "[OpenAI](https://en.wikipedia.org/wiki/OpenAI) is an artificial intelligence (AI) research laboratory.\n",
+ "\n",
+ "This guide will help you getting started with ChatOpenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html).\n",
"\n",
"## Overview\n",
"### Integration details\n",
@@ -31,17 +33,20 @@
"| [ChatOpenAI](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | \n",
"\n",
"## Setup\n",
"\n",
- "To access ChatOpenAI models you'll need to create a ChatOpenAI account, get an API key, and install the `@langchain/openai` integration package.\n",
+ "To access OpenAI chat models you'll need to create an OpenAI account, get an API key, and install the `@langchain/openai` integration package.\n",
"\n",
"### Credentials\n",
"\n",
- "Head to [OpenAI's website](https://platform.openai.com/) to sign up to ChatOpenAI and generate an API key. Once you've done this set the `OPENAI_API_KEY` environment variable:\n",
+ "Head to [OpenAI's website](https://platform.openai.com/) to sign up for OpenAI and generate an API key. Once you've done this set the `OPENAI_API_KEY` environment variable:\n",
"\n",
"```bash\n",
"export OPENAI_API_KEY=\"your-api-key\"\n",
@@ -56,7 +61,7 @@
"\n",
"### Installation\n",
"\n",
- "The LangChain ChatOpenAI integration lives in the `@langchain/openai` package:\n",
+ "The LangChain `ChatOpenAI` integration lives in the `@langchain/openai` package:\n",
"\n",
"```{=mdx}\n",
"\n",
@@ -94,9 +99,6 @@
"const llm = new ChatOpenAI({\n",
" model: \"gpt-4o\",\n",
" temperature: 0,\n",
- " maxTokens: undefined,\n",
- " timeout: undefined,\n",
- " maxRetries: 2,\n",
" // other params...\n",
"})"
]
@@ -241,415 +243,88 @@
},
{
"cell_type": "markdown",
- "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
+ "id": "06ffc86b",
"metadata": {},
"source": [
- "## Multimodal messages\n",
- "\n",
- "```{=mdx}\n",
- "\n",
- ":::info\n",
- "This feature is currently in preview. The message schema may change in future releases.\n",
- ":::\n",
- "\n",
- "```\n",
+ "## Custom URLs\n",
"\n",
- "OpenAI supports interleaving images with text in input messages with their `gpt-4-vision-preview`. Here's an example of how this looks:"
+ "You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this:"
]
},
{
"cell_type": "code",
- "execution_count": 1,
- "id": "fd55c000",
+ "execution_count": null,
+ "id": "19a092b9",
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "AIMessage {\n",
- " \"id\": \"chatcmpl-9rB59AKTPDrSHuTv0y7BNUcM0QDV2\",\n",
- " \"content\": \"The image shows a classic hot dog, consisting of a grilled or steamed sausage served in the slit of a partially sliced bun. The sausage appears to have grill marks, indicating it may have been cooked on a grill. This is a typical and popular snack or fast food item often enjoyed at sporting events, barbecues, and fairs.\",\n",
- " \"additional_kwargs\": {},\n",
- " \"response_metadata\": {\n",
- " \"tokenUsage\": {\n",
- " \"completionTokens\": 69,\n",
- " \"promptTokens\": 438,\n",
- " \"totalTokens\": 507\n",
- " },\n",
- " \"finish_reason\": \"stop\"\n",
- " },\n",
- " \"tool_calls\": [],\n",
- " \"invalid_tool_calls\": [],\n",
- " \"usage_metadata\": {\n",
- " \"input_tokens\": 438,\n",
- " \"output_tokens\": 69,\n",
- " \"total_tokens\": 507\n",
- " }\n",
- "}\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
- "import * as fs from \"node:fs/promises\";\n",
- "\n",
"import { ChatOpenAI } from \"@langchain/openai\";\n",
- "import { HumanMessage } from \"@langchain/core/messages\";\n",
"\n",
- "const imageData2 = await fs.readFile(\"../../../../../examples/hotdog.jpg\");\n",
- "const llm2 = new ChatOpenAI({\n",
- " model: \"gpt-4-vision-preview\",\n",
- " maxTokens: 1024,\n",
- " apiKey: process.env.OPENAI_API_KEY,\n",
- "});\n",
- "const message2 = new HumanMessage({\n",
- " content: [\n",
- " {\n",
- " type: \"text\",\n",
- " text: \"What's in this image?\",\n",
- " },\n",
- " {\n",
- " type: \"image_url\",\n",
- " image_url: {\n",
- " url: `data:image/jpeg;base64,${imageData2.toString(\"base64\")}`,\n",
- " },\n",
- " },\n",
- " ],\n",
+ "const llmWithCustomURL = new ChatOpenAI({\n",
+ " temperature: 0.9,\n",
+ " configuration: {\n",
+ " baseURL: \"https://your_custom_url.com\",\n",
+ " },\n",
"});\n",
"\n",
- "const res2 = await llm2.invoke([message2]);\n",
- "console.log(res2);\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "eafbba15",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "AIMessage {\n",
- " \"id\": \"chatcmpl-9rB5EWz5AyOHg6UiFkt4HC8H4UZJu\",\n",
- " \"content\": \"The image contains text that reads \\\"LangChain\\\". Additionally, there is an illustration of a parrot on the left side and two interlinked rings on the right.\",\n",
- " \"additional_kwargs\": {},\n",
- " \"response_metadata\": {\n",
- " \"tokenUsage\": {\n",
- " \"completionTokens\": 33,\n",
- " \"promptTokens\": 778,\n",
- " \"totalTokens\": 811\n",
- " },\n",
- " \"finish_reason\": \"stop\"\n",
- " },\n",
- " \"tool_calls\": [],\n",
- " \"invalid_tool_calls\": [],\n",
- " \"usage_metadata\": {\n",
- " \"input_tokens\": 778,\n",
- " \"output_tokens\": 33,\n",
- " \"total_tokens\": 811\n",
- " }\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "const hostedImageMessage3 = new HumanMessage({\n",
- " content: [\n",
- " {\n",
- " type: \"text\",\n",
- " text: \"What does this image say?\",\n",
- " },\n",
- " {\n",
- " type: \"image_url\",\n",
- " image_url:\n",
- " \"https://www.freecodecamp.org/news/content/images/2023/05/Screenshot-2023-05-29-at-5.40.38-PM.png\",\n",
- " },\n",
- " ],\n",
- "});\n",
- "const res3 = await llm2.invoke([hostedImageMessage3]);\n",
- "console.log(res3);"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "a3832fc3",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "AIMessage {\n",
- " \"id\": \"chatcmpl-9rB5IUbzvMo5nsOGYW3jvrQjaCiCg\",\n",
- " \"content\": \"The image shows a user interface of a digital service or platform called \\\"WebLangChain\\\" which appears to be powered by \\\"Tailify.\\\" There is a prompt that encourages users to \\\"Ask me anything about anything!\\\" Alongside this, there is a text input field labeled \\\"Ask anything...\\\" which also features some example questions or search queries such as \\\"what is langchain?\\\", \\\"history of mesopotamia\\\", \\\"how to build a discord bot\\\", \\\"leonardo dicaprio girlfriend\\\", \\\"fun gift ideas for software engineers\\\", \\\"how does a prism separate light\\\", and \\\"what bear is best\\\". The overall design is clean, with a dark background and a send button represented by a blue icon with a paper airplane, which typically symbolizes sending a message or submitting a query.\",\n",
- " \"additional_kwargs\": {},\n",
- " \"response_metadata\": {\n",
- " \"tokenUsage\": {\n",
- " \"completionTokens\": 158,\n",
- " \"promptTokens\": 101,\n",
- " \"totalTokens\": 259\n",
- " },\n",
- " \"finish_reason\": \"stop\"\n",
- " },\n",
- " \"tool_calls\": [],\n",
- " \"invalid_tool_calls\": [],\n",
- " \"usage_metadata\": {\n",
- " \"input_tokens\": 101,\n",
- " \"output_tokens\": 158,\n",
- " \"total_tokens\": 259\n",
- " }\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "const lowDetailImage4 = new HumanMessage({\n",
- " content: [\n",
- " {\n",
- " type: \"text\",\n",
- " text: \"Summarize the contents of this image.\",\n",
- " },\n",
- " {\n",
- " type: \"image_url\",\n",
- " image_url: {\n",
- " url: \"https://blog.langchain.dev/content/images/size/w1248/format/webp/2023/10/Screenshot-2023-10-03-at-4.55.29-PM.png\",\n",
- " detail: \"low\",\n",
- " },\n",
- " },\n",
- " ],\n",
- "});\n",
- "const res4 = await llm2.invoke([lowDetailImage4]);\n",
- "console.log(res4);"
+ "await llmWithCustomURL.invoke(\"Hi there!\");"
]
},
{
"cell_type": "markdown",
- "id": "1a39ecb3",
+ "id": "013b6300",
"metadata": {},
"source": [
- "## Tool calling\n",
+ "You can also pass other `ClientOptions` parameters accepted by the official SDK here.\n",
"\n",
- "OpenAI chat models support calling multiple functions to get all required data to answer a question.\n",
- "Here's an example how a conversation turn with this functionality might look:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "id": "c65f489f",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[\n",
- " {\n",
- " name: 'get_current_weather',\n",
- " args: { location: 'San Francisco', unit: 'celsius' },\n",
- " type: 'tool_call',\n",
- " id: 'call_2ytmjITA18j3kLOzzjF5QSC4'\n",
- " },\n",
- " {\n",
- " name: 'get_current_weather',\n",
- " args: { location: 'Tokyo', unit: 'celsius' },\n",
- " type: 'tool_call',\n",
- " id: 'call_3sU2dCNZ8e8A8wrYlYa7Xq0G'\n",
- " },\n",
- " {\n",
- " name: 'get_current_weather',\n",
- " args: { location: 'Paris', unit: 'celsius' },\n",
- " type: 'tool_call',\n",
- " id: 'call_Crmc0QG4x1VHRUyiwPsqzmQS'\n",
- " }\n",
- "]\n"
- ]
- }
- ],
- "source": [
- "import { ChatOpenAI } from \"@langchain/openai\";\n",
+ "If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/chat/azure).\n",
"\n",
- "// Bind function to the model as a tool\n",
- "const llm5 = new ChatOpenAI({\n",
- " model: \"gpt-3.5-turbo-1106\",\n",
- " maxTokens: 128,\n",
- "}).bind({\n",
- " tools: [\n",
- " {\n",
- " type: \"function\",\n",
- " function: {\n",
- " name: \"get_current_weather\",\n",
- " description: \"Get the current weather in a given location\",\n",
- " parameters: {\n",
- " type: \"object\",\n",
- " properties: {\n",
- " location: {\n",
- " type: \"string\",\n",
- " description: \"The city and state, e.g. San Francisco, CA\",\n",
- " },\n",
- " unit: { type: \"string\", enum: [\"celsius\", \"fahrenheit\"] },\n",
- " },\n",
- " required: [\"location\"],\n",
- " },\n",
- " },\n",
- " },\n",
- " ],\n",
- " tool_choice: \"auto\",\n",
- "});\n",
+ "## Calling fine-tuned models\n",
+ "\n",
+ "You can call fine-tuned OpenAI models by passing in your corresponding `modelName` parameter.\n",
"\n",
- "// Ask initial question that requires multiple tool calls\n",
- "const res5 = await llm5.invoke([\n",
- " [\"human\", \"What's the weather like in San Francisco, Tokyo, and Paris?\"],\n",
- "]);\n",
- "console.log(res5.tool_calls);"
+ "This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example:"
]
},
{
"cell_type": "code",
- "execution_count": 5,
- "id": "c0d3a6a1",
+ "execution_count": null,
+ "id": "7448f6a9",
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "AIMessage {\n",
- " \"id\": \"chatcmpl-9rB5Sc3ERHpRymmAAsGS67zczVhAl\",\n",
- " \"content\": \"The current weather in:\\n- San Francisco is 72°F\\n- Tokyo is 10°C\\n- Paris is 22°C\",\n",
- " \"additional_kwargs\": {},\n",
- " \"response_metadata\": {\n",
- " \"tokenUsage\": {\n",
- " \"completionTokens\": 27,\n",
- " \"promptTokens\": 236,\n",
- " \"totalTokens\": 263\n",
- " },\n",
- " \"finish_reason\": \"stop\",\n",
- " \"system_fingerprint\": \"fp_adbef9f124\"\n",
- " },\n",
- " \"tool_calls\": [],\n",
- " \"invalid_tool_calls\": [],\n",
- " \"usage_metadata\": {\n",
- " \"input_tokens\": 236,\n",
- " \"output_tokens\": 27,\n",
- " \"total_tokens\": 263\n",
- " }\n",
- "}\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
- "import { ToolMessage } from \"@langchain/core/messages\";\n",
- "\n",
- "// Mocked out function, could be a database/API call in production\n",
- "function getCurrentWeather(location: string, _unit?: string) {\n",
- " if (location.toLowerCase().includes(\"tokyo\")) {\n",
- " return JSON.stringify({ location, temperature: \"10\", unit: \"celsius\" });\n",
- " } else if (location.toLowerCase().includes(\"san francisco\")) {\n",
- " return JSON.stringify({\n",
- " location,\n",
- " temperature: \"72\",\n",
- " unit: \"fahrenheit\",\n",
- " });\n",
- " } else {\n",
- " return JSON.stringify({ location, temperature: \"22\", unit: \"celsius\" });\n",
- " }\n",
- "}\n",
+ "import { ChatOpenAI } from \"@langchain/openai\";\n",
"\n",
- "// Format the results from calling the tool calls back to OpenAI as ToolMessages\n",
- "const toolMessages5 = res5.additional_kwargs.tool_calls?.map((toolCall) => {\n",
- " const toolCallResult5 = getCurrentWeather(\n",
- " JSON.parse(toolCall.function.arguments).location\n",
- " );\n",
- " return new ToolMessage({\n",
- " tool_call_id: toolCall.id,\n",
- " name: toolCall.function.name,\n",
- " content: toolCallResult5,\n",
- " });\n",
+ "const fineTunedLlm = new ChatOpenAI({\n",
+ " temperature: 0.9,\n",
+ " model: \"ft:gpt-3.5-turbo-0613:{ORG_NAME}::{MODEL_ID}\",\n",
"});\n",
"\n",
- "// Send the results back as the next step in the conversation\n",
- "const finalResponse5 = await llm5.invoke([\n",
- " [\"human\", \"What's the weather like in San Francisco, Tokyo, and Paris?\"],\n",
- " res5,\n",
- " ...(toolMessages5 ?? []),\n",
- "]);\n",
- "\n",
- "console.log(finalResponse5);"
+ "await fineTunedLlm.invoke(\"Hi there!\");"
]
},
{
"cell_type": "markdown",
- "id": "067715fe",
- "metadata": {},
- "source": [
- "### `.withStructuredOutput({ ... })`\n",
- "\n",
- "You can also use the `.withStructuredOutput({ ... })` method to coerce `ChatOpenAI` into returning a structured output.\n",
- "\n",
- "The method allows for passing in either a Zod object, or a valid JSON schema (like what is returned from [`zodToJsonSchema`](https://www.npmjs.com/package/zod-to-json-schema)).\n",
- "\n",
- "Using the method is simple. Just define your LLM and call `.withStructuredOutput({ ... })` on it, passing the desired schema.\n",
- "\n",
- "Here is an example using a Zod schema and the `functionCalling` mode (default mode):"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "94bab2ee",
+ "id": "a2270901",
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{ operation: 'add', number1: 2, number2: 2 }\n"
- ]
- }
- ],
"source": [
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "import { ChatOpenAI } from \"@langchain/openai\";\n",
- "import { z } from \"zod\";\n",
+ "## Generation metadata\n",
"\n",
- "const llm6 = new ChatOpenAI({\n",
- " temperature: 0,\n",
- " model: \"gpt-4-turbo-preview\",\n",
- "});\n",
+ "If you need additional information like logprobs or token usage, these will be returned directly in the `.invoke` response within the `response_metadata` field on the message.\n",
"\n",
- "const calculatorSchema6 = z.object({\n",
- " operation: z.enum([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n",
- " number1: z.number(),\n",
- " number2: z.number(),\n",
- "});\n",
+ "```{=mdx}\n",
"\n",
- "const modelWithStructuredOutput6 = llm6.withStructuredOutput(calculatorSchema6);\n",
+ ":::tip\n",
+ "Requires `@langchain/core` version >=0.1.48.\n",
+ ":::\n",
"\n",
- "const prompt6 = ChatPromptTemplate.fromMessages([\n",
- " [\"system\", \"You are VERY bad at math and must always use a calculator.\"],\n",
- " [\"human\", \"Please help me!! What is 2 + 2?\"],\n",
- "]);\n",
- "const chain6 = prompt6.pipe(modelWithStructuredOutput6);\n",
- "const result6 = await chain6.invoke({});\n",
- "console.log(result6);\n"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "b6e97547",
- "metadata": {},
- "source": [
- "You can also specify `includeRaw` to return the parsed and raw output in the result."
+ "```"
]
},
{
"cell_type": "code",
- "execution_count": 8,
- "id": "640acaf4",
+ "execution_count": 2,
+ "id": "2b675330",
"metadata": {},
"outputs": [
{
@@ -657,760 +332,96 @@
"output_type": "stream",
"text": [
"{\n",
- " raw: AIMessage {\n",
- " \"id\": \"chatcmpl-9rB5emIYRslBFrUIsC2368dXltljw\",\n",
- " \"content\": \"\",\n",
- " \"additional_kwargs\": {\n",
- " \"tool_calls\": [\n",
- " {\n",
- " \"id\": \"call_JaH5OB3KYvKF76TUOt6Lp8mu\",\n",
- " \"type\": \"function\",\n",
- " \"function\": \"[Object]\"\n",
- " }\n",
- " ]\n",
+ " content: [\n",
+ " {\n",
+ " token: 'Hello',\n",
+ " logprob: -0.0005151443,\n",
+ " bytes: [ 72, 101, 108, 108, 111 ],\n",
+ " top_logprobs: []\n",
+ " },\n",
+ " {\n",
+ " token: '!',\n",
+ " logprob: -0.00004334534,\n",
+ " bytes: [ 33 ],\n",
+ " top_logprobs: []\n",
+ " },\n",
+ " {\n",
+ " token: ' How',\n",
+ " logprob: -0.000035477897,\n",
+ " bytes: [ 32, 72, 111, 119 ],\n",
+ " top_logprobs: []\n",
+ " },\n",
+ " {\n",
+ " token: ' can',\n",
+ " logprob: -0.0006658526,\n",
+ " bytes: [ 32, 99, 97, 110 ],\n",
+ " top_logprobs: []\n",
+ " },\n",
+ " {\n",
+ " token: ' I',\n",
+ " logprob: -0.0000010280384,\n",
+ " bytes: [ 32, 73 ],\n",
+ " top_logprobs: []\n",
+ " },\n",
+ " {\n",
+ " token: ' assist',\n",
+ " logprob: -0.10124119,\n",
+ " bytes: [\n",
+ " 32, 97, 115,\n",
+ " 115, 105, 115,\n",
+ " 116\n",
+ " ],\n",
+ " top_logprobs: []\n",
+ " },\n",
+ " {\n",
+ " token: ' you',\n",
+ " logprob: -5.5122365e-7,\n",
+ " bytes: [ 32, 121, 111, 117 ],\n",
+ " top_logprobs: []\n",
" },\n",
- " \"response_metadata\": {\n",
- " \"tokenUsage\": {\n",
- " \"completionTokens\": 15,\n",
- " \"promptTokens\": 93,\n",
- " \"totalTokens\": 108\n",
- " },\n",
- " \"finish_reason\": \"stop\"\n",
+ " {\n",
+ " token: ' today',\n",
+ " logprob: -0.000052643223,\n",
+ " bytes: [ 32, 116, 111, 100, 97, 121 ],\n",
+ " top_logprobs: []\n",
" },\n",
- " \"tool_calls\": [\n",
- " {\n",
- " \"name\": \"calculator\",\n",
- " \"args\": {\n",
- " \"number1\": 2,\n",
- " \"number2\": 2,\n",
- " \"operation\": \"add\"\n",
- " },\n",
- " \"type\": \"tool_call\",\n",
- " \"id\": \"call_JaH5OB3KYvKF76TUOt6Lp8mu\"\n",
- " }\n",
- " ],\n",
- " \"invalid_tool_calls\": [],\n",
- " \"usage_metadata\": {\n",
- " \"input_tokens\": 93,\n",
- " \"output_tokens\": 15,\n",
- " \"total_tokens\": 108\n",
+ " {\n",
+ " token: '?',\n",
+ " logprob: -0.000012352386,\n",
+ " bytes: [ 63 ],\n",
+ " top_logprobs: []\n",
" }\n",
- " },\n",
- " parsed: { operation: 'add', number1: 2, number2: 2 }\n",
+ " ]\n",
"}\n"
]
}
],
"source": [
- "const includeRawModel6 = llm6.withStructuredOutput(calculatorSchema6, {\n",
- " name: \"calculator\",\n",
- " includeRaw: true,\n",
+ "import { ChatOpenAI } from \"@langchain/openai\";\n",
+ "\n",
+ "// See https://cookbook.openai.com/examples/using_logprobs for details\n",
+ "const llmWithLogprobs = new ChatOpenAI({\n",
+ " logprobs: true,\n",
+ " // topLogprobs: 5,\n",
"});\n",
"\n",
- "const includeRawChain6 = prompt6.pipe(includeRawModel6);\n",
- "const includeRawResult6 = await includeRawChain6.invoke({});\n",
- "console.log(includeRawResult6);"
+ "const responseMessageWithLogprobs = await llmWithLogprobs.invoke(\"Hi there!\");\n",
+ "console.dir(responseMessageWithLogprobs.response_metadata.logprobs, { depth: null });"
]
},
{
"cell_type": "markdown",
- "id": "f92f236c",
+ "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
"metadata": {},
"source": [
- "Additionally, you can pass in an OpenAI function definition or JSON schema directly:\n",
+ "## Tool calling\n",
"\n",
- "```{=mdx}\n",
+ "Tool calling with OpenAI models works in a similar to [other models](/docs/how_to/tool_calling). Additionally, the following guides have some information especially relevant to OpenAI:\n",
"\n",
- ":::info\n",
- "If using `jsonMode` as the `method` you must include context in your prompt about the structured output you want. This _must_ include the keyword: `JSON`.\n",
- ":::\n",
+ "- [How to: disable parallel tool calling](/docs/how_to/tool_calling_parallel/)\n",
+ "- [How to: force a tool call](/docs/how_to/tool_choice/)\n",
+ "- [How to: bind model-specific tool formats to a model](/docs/how_to/tool_calling#binding-model-specific-formats-advanced).\n",
"\n",
- "```"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "id": "02e01d32",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{ number1: 2, number2: 2, operation: 'add' }\n"
- ]
- }
- ],
- "source": [
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "import { ChatOpenAI } from \"@langchain/openai\";\n",
- "\n",
- "const llm7 = new ChatOpenAI({\n",
- " temperature: 0,\n",
- " model: \"gpt-4-turbo-preview\",\n",
- "});\n",
- "\n",
- "const calculatorSchema7 = {\n",
- " type: \"object\",\n",
- " properties: {\n",
- " operation: {\n",
- " type: \"string\",\n",
- " enum: [\"add\", \"subtract\", \"multiply\", \"divide\"],\n",
- " },\n",
- " number1: { type: \"number\" },\n",
- " number2: { type: \"number\" },\n",
- " },\n",
- " required: [\"operation\", \"number1\", \"number2\"],\n",
- "};\n",
- "\n",
- "// Default mode is \"functionCalling\"\n",
- "const modelWithStructuredOutput7 = llm7.withStructuredOutput(calculatorSchema7);\n",
- "\n",
- "const prompt7 = ChatPromptTemplate.fromMessages([\n",
- " [\n",
- " \"system\",\n",
- " `You are VERY bad at math and must always use a calculator.\n",
- "Respond with a JSON object containing three keys:\n",
- "'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide',\n",
- "'number1': the first number to operate on,\n",
- "'number2': the second number to operate on.\n",
- "`,\n",
- " ],\n",
- " [\"human\", \"Please help me!! What is 2 + 2?\"],\n",
- "]);\n",
- "const chain7 = prompt7.pipe(modelWithStructuredOutput7);\n",
- "const result7 = await chain7.invoke({});\n",
- "console.log(result7);"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "ae798f49",
- "metadata": {},
- "source": [
- "You can also specify 'includeRaw' to return the parsed and raw output in the result, as well as a \"name\" field to give the LLM additional context as to what you are generating."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "id": "9a5579e4",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{\n",
- " raw: AIMessage {\n",
- " \"id\": \"chatcmpl-9rB5lkylQMLSP9CQ4SaQB9zGw1rP1\",\n",
- " \"content\": \"{\\n \\\"operation\\\": \\\"add\\\",\\n \\\"number1\\\": 2,\\n \\\"number2\\\": 2\\n}\",\n",
- " \"additional_kwargs\": {},\n",
- " \"response_metadata\": {\n",
- " \"tokenUsage\": {\n",
- " \"completionTokens\": 25,\n",
- " \"promptTokens\": 91,\n",
- " \"totalTokens\": 116\n",
- " },\n",
- " \"finish_reason\": \"stop\"\n",
- " },\n",
- " \"tool_calls\": [],\n",
- " \"invalid_tool_calls\": [],\n",
- " \"usage_metadata\": {\n",
- " \"input_tokens\": 91,\n",
- " \"output_tokens\": 25,\n",
- " \"total_tokens\": 116\n",
- " }\n",
- " },\n",
- " parsed: { operation: 'add', number1: 2, number2: 2 }\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "const includeRawModel7 = llm7.withStructuredOutput(calculatorSchema7, {\n",
- " name: \"calculator\",\n",
- " includeRaw: true,\n",
- " method: \"jsonMode\",\n",
- "});\n",
- "\n",
- "const includeRawChain7 = prompt7.pipe(includeRawModel7);\n",
- "const includeRawResult7 = await includeRawChain7.invoke({});\n",
- "console.log(includeRawResult7);"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "bf343e65",
- "metadata": {},
- "source": [
- "### Disabling parallel tool calls\n",
- "\n",
- "If you have multiple tools bound to the model, but you'd only like for a single tool to be called at a time, you can pass the `parallel_tool_calls` call option to enable/disable this behavior.\n",
- "By default, `parallel_tool_calls` is set to `true`."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "id": "5cb759f2",
- "metadata": {},
- "outputs": [],
- "source": [
- "import { ChatOpenAI } from \"@langchain/openai\";\n",
- "import { z } from \"zod\";\n",
- "import { zodToJsonSchema } from \"zod-to-json-schema\";\n",
- "\n",
- "const llm8 = new ChatOpenAI({\n",
- " temperature: 0,\n",
- " model: \"gpt-4o\",\n",
- "});\n",
- "\n",
- "// Define your tools\n",
- "const calculatorSchema8 = z\n",
- " .object({\n",
- " operation: z.enum([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n",
- " number1: z.number(),\n",
- " number2: z.number(),\n",
- " })\n",
- " .describe(\"A tool to perform basic arithmetic operations\");\n",
- "const weatherSchema8 = z\n",
- " .object({\n",
- " city: z.string(),\n",
- " })\n",
- " .describe(\"A tool to get the weather in a city\");\n",
- "\n",
- "// Bind tools to the model\n",
- "const modelWithTools8 = llm8.bindTools([\n",
- " {\n",
- " type: \"function\",\n",
- " function: {\n",
- " name: \"calculator\",\n",
- " description: calculatorSchema8.description,\n",
- " parameters: zodToJsonSchema(calculatorSchema8),\n",
- " },\n",
- " },\n",
- " {\n",
- " type: \"function\",\n",
- " function: {\n",
- " name: \"weather\",\n",
- " description: weatherSchema8.description,\n",
- " parameters: zodToJsonSchema(weatherSchema8),\n",
- " },\n",
- " },\n",
- "]);\n",
- "\n",
- "// Invoke the model with `parallel_tool_calls` set to `true`\n",
- "const response8 = await modelWithTools8.invoke(\n",
- " [\"What is the weather in san francisco and what is 23716 times 27342?\"],\n",
- " {\n",
- " parallel_tool_calls: true,\n",
- " }\n",
- ");"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "42a69645",
- "metadata": {},
- "source": [
- "We can see it called two tools:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "id": "95db614b",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[\n",
- " {\n",
- " name: 'weather',\n",
- " args: { city: 'san francisco' },\n",
- " type: 'tool_call',\n",
- " id: 'call_FyxazII0M0OgKMnk2UuXDhjv'\n",
- " },\n",
- " {\n",
- " name: 'calculator',\n",
- " args: { operation: 'multiply', number1: 23716, number2: 27342 },\n",
- " type: 'tool_call',\n",
- " id: 'call_raQz2ABUtVpbkruA2K6vBNYd'\n",
- " }\n",
- "]\n"
- ]
- }
- ],
- "source": [
- "console.log(response8.tool_calls);"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "6a46a7bb",
- "metadata": {},
- "source": [
- "Invoke the model with `parallel_tool_calls` set to `false`"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 13,
- "id": "6a2bac84",
- "metadata": {},
- "outputs": [],
- "source": [
- "const response9 = await modelWithTools8.invoke(\n",
- " [\"What is the weather in san francisco and what is 23716 times 27342?\"],\n",
- " {\n",
- " parallel_tool_calls: false,\n",
- " }\n",
- ");"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "17d91e9f",
- "metadata": {},
- "source": [
- "We can see it called one tool"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 14,
- "id": "5731d51d",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[\n",
- " {\n",
- " name: 'weather',\n",
- " args: { city: 'san francisco' },\n",
- " type: 'tool_call',\n",
- " id: 'call_xFbViRUVYj8BFnJIVedU7GVn'\n",
- " }\n",
- "]\n"
- ]
- }
- ],
- "source": [
- "console.log(response9.tool_calls);"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "06ffc86b",
- "metadata": {},
- "source": [
- "## Custom URLs\n",
- "\n",
- "You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "19a092b9",
- "metadata": {},
- "outputs": [],
- "source": [
- "import { ChatOpenAI } from \"@langchain/openai\";\n",
- "\n",
- "const llm10 = new ChatOpenAI({\n",
- " temperature: 0.9,\n",
- " configuration: {\n",
- " baseURL: \"https://your_custom_url.com\",\n",
- " },\n",
- "});\n",
- "\n",
- "const message10 = await llm10.invoke(\"Hi there!\");"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "013b6300",
- "metadata": {},
- "source": [
- "You can also pass other `ClientOptions` parameters accepted by the official SDK.\n",
- "\n",
- "If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/chat/azure).\n",
- "\n",
- "## Calling fine-tuned models\n",
- "\n",
- "You can call fine-tuned OpenAI models by passing in your corresponding `modelName` parameter.\n",
- "\n",
- "This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "7448f6a9",
- "metadata": {},
- "outputs": [],
- "source": [
- "import { ChatOpenAI } from \"@langchain/openai\";\n",
- "\n",
- "const llm11 = new ChatOpenAI({\n",
- " temperature: 0.9,\n",
- " model: \"ft:gpt-3.5-turbo-0613:{ORG_NAME}::{MODEL_ID}\",\n",
- "});\n",
- "\n",
- "const message11 = await llm11.invoke(\"Hi there!\");"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "a2270901",
- "metadata": {},
- "source": [
- "## Generation metadata\n",
- "\n",
- "If you need additional information like logprobs or token usage, these will be returned directly in the `.invoke` response.\n",
- "\n",
- "```{=mdx}\n",
- "\n",
- ":::tip\n",
- "Requires `@langchain/core` version >=0.1.48.\n",
- ":::\n",
- "\n",
- "```"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 15,
- "id": "2b675330",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{\n",
- " content: [\n",
- " {\n",
- " token: 'Hello',\n",
- " logprob: -0.0004585519,\n",
- " bytes: [ 72, 101, 108, 108, 111 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: '!',\n",
- " logprob: -0.000049305523,\n",
- " bytes: [ 33 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' How',\n",
- " logprob: -0.000029517714,\n",
- " bytes: [ 32, 72, 111, 119 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' can',\n",
- " logprob: -0.00073185476,\n",
- " bytes: [ 32, 99, 97, 110 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' I',\n",
- " logprob: -9.0883464e-7,\n",
- " bytes: [ 32, 73 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' assist',\n",
- " logprob: -0.104538105,\n",
- " bytes: [\n",
- " 32, 97, 115,\n",
- " 115, 105, 115,\n",
- " 116\n",
- " ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' you',\n",
- " logprob: -6.704273e-7,\n",
- " bytes: [ 32, 121, 111, 117 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' today',\n",
- " logprob: -0.000052643223,\n",
- " bytes: [ 32, 116, 111, 100, 97, 121 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: '?',\n",
- " logprob: -0.00001247159,\n",
- " bytes: [ 63 ],\n",
- " top_logprobs: []\n",
- " }\n",
- " ]\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "import { ChatOpenAI } from \"@langchain/openai\";\n",
- "\n",
- "// See https://cookbook.openai.com/examples/using_logprobs for details\n",
- "const llm12 = new ChatOpenAI({\n",
- " logprobs: true,\n",
- " // topLogprobs: 5,\n",
- "});\n",
- "\n",
- "const responseMessage12 = await llm12.invoke(\"Hi there!\");\n",
- "console.dir(responseMessage12.response_metadata.logprobs, { depth: null });"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "899c364f",
- "metadata": {},
- "source": [
- "### With callbacks\n",
- "\n",
- "You can also use the callbacks system:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 16,
- "id": "01e74121",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{\n",
- " content: [\n",
- " {\n",
- " token: 'Hello',\n",
- " logprob: -0.0005182436,\n",
- " bytes: [ 72, 101, 108, 108, 111 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: '!',\n",
- " logprob: -0.000040246043,\n",
- " bytes: [ 33 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' How',\n",
- " logprob: -0.000035716304,\n",
- " bytes: [ 32, 72, 111, 119 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' can',\n",
- " logprob: -0.0006764544,\n",
- " bytes: [ 32, 99, 97, 110 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' I',\n",
- " logprob: -0.0000010280384,\n",
- " bytes: [ 32, 73 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' assist',\n",
- " logprob: -0.12827769,\n",
- " bytes: [\n",
- " 32, 97, 115,\n",
- " 115, 105, 115,\n",
- " 116\n",
- " ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' you',\n",
- " logprob: -4.3202e-7,\n",
- " bytes: [ 32, 121, 111, 117 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' today',\n",
- " logprob: -0.000059914648,\n",
- " bytes: [ 32, 116, 111, 100, 97, 121 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: '?',\n",
- " logprob: -0.000012352386,\n",
- " bytes: [ 63 ],\n",
- " top_logprobs: []\n",
- " }\n",
- " ]\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "import { ChatOpenAI } from \"@langchain/openai\";\n",
- "\n",
- "// See https://cookbook.openai.com/examples/using_logprobs for details\n",
- "const llm13 = new ChatOpenAI({\n",
- " logprobs: true,\n",
- " // topLogprobs: 5,\n",
- "});\n",
- "\n",
- "const result13 = await llm13.invoke(\"Hi there!\", {\n",
- " callbacks: [\n",
- " {\n",
- " handleLLMEnd(output) {\n",
- " console.dir(output.generations[0][0].generationInfo.logprobs, { depth: null });\n",
- " },\n",
- " },\n",
- " ],\n",
- "});"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 17,
- "id": "7f9f01aa",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{\n",
- " content: [\n",
- " {\n",
- " token: 'Hello',\n",
- " logprob: -0.0005182436,\n",
- " bytes: [ 72, 101, 108, 108, 111 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: '!',\n",
- " logprob: -0.000040246043,\n",
- " bytes: [ 33 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' How',\n",
- " logprob: -0.000035716304,\n",
- " bytes: [ 32, 72, 111, 119 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' can',\n",
- " logprob: -0.0006764544,\n",
- " bytes: [ 32, 99, 97, 110 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' I',\n",
- " logprob: -0.0000010280384,\n",
- " bytes: [ 32, 73 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' assist',\n",
- " logprob: -0.12827769,\n",
- " bytes: [\n",
- " 32, 97, 115,\n",
- " 115, 105, 115,\n",
- " 116\n",
- " ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' you',\n",
- " logprob: -4.3202e-7,\n",
- " bytes: [ 32, 121, 111, 117 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: ' today',\n",
- " logprob: -0.000059914648,\n",
- " bytes: [ 32, 116, 111, 100, 97, 121 ],\n",
- " top_logprobs: []\n",
- " },\n",
- " {\n",
- " token: '?',\n",
- " logprob: -0.000012352386,\n",
- " bytes: [ 63 ],\n",
- " top_logprobs: []\n",
- " }\n",
- " ]\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "console.dir(result13.response_metadata.logprobs, { depth: null });"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "5194627d",
- "metadata": {},
- "source": [
- "## Streaming tokens\n",
- "\n",
- "OpenAI supports streaming token counts via an opt-in call option. This can be set by passing `{ stream_options: { include_usage: true } }`.\n",
- "Setting this call option will cause the model to return an additional chunk at the end of the stream, containing the token usage."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 18,
- "id": "f6efaebb",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{ input_tokens: 13, output_tokens: 33, total_tokens: 46 }\n"
- ]
- }
- ],
- "source": [
- "import type { AIMessageChunk } from \"@langchain/core/messages\";\n",
- "import { ChatOpenAI } from \"@langchain/openai\";\n",
- "import { concat } from \"@langchain/core/utils/stream\";\n",
- "\n",
- "// Instantiate the model\n",
- "const llm14 = new ChatOpenAI();\n",
- "\n",
- "const response14 = await llm14.stream(\"Hello, how are you?\", {\n",
- " // Pass the stream options\n",
- " stream_options: {\n",
- " include_usage: true,\n",
- " },\n",
- "});\n",
- "\n",
- "// Iterate over the response, only saving the last chunk\n",
- "let finalResult14: AIMessageChunk | undefined;\n",
- "for await (const chunk14 of response14) {\n",
- " finalResult14 = !finalResult14 ? chunk14 : concat(finalResult14, chunk14);\n",
- "}\n",
- "\n",
- "console.log(finalResult14?.usage_metadata);"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
- "metadata": {},
- "source": [
"## API reference\n",
"\n",
"For detailed documentation of all ChatOpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html"
diff --git a/docs/core_docs/docs/integrations/chat/premai.mdx b/docs/core_docs/docs/integrations/chat/premai.mdx
index eb7324f48b2f..ceb9f963604c 100644
--- a/docs/core_docs/docs/integrations/chat/premai.mdx
+++ b/docs/core_docs/docs/integrations/chat/premai.mdx
@@ -28,3 +28,8 @@ npm install @langchain/community
import PremAI from "@examples/models/chat/integration_premai.ts";
{PremAI}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/prompt_layer_openai.mdx b/docs/core_docs/docs/integrations/chat/prompt_layer_openai.mdx
index bbbbbd5c0076..21a92fda13b5 100644
--- a/docs/core_docs/docs/integrations/chat/prompt_layer_openai.mdx
+++ b/docs/core_docs/docs/integrations/chat/prompt_layer_openai.mdx
@@ -51,3 +51,8 @@ console.log(JSON.stringify(respA, null, 3));
}
*/
```
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/tencent_hunyuan.mdx b/docs/core_docs/docs/integrations/chat/tencent_hunyuan.mdx
index 339cd7af9bd5..736a07833aa8 100644
--- a/docs/core_docs/docs/integrations/chat/tencent_hunyuan.mdx
+++ b/docs/core_docs/docs/integrations/chat/tencent_hunyuan.mdx
@@ -39,3 +39,8 @@ Here's an example:
import TencentHunyuan from "@examples/models/chat/integration_tencent_hunyuan.ts";
{TencentHunyuan}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/togetherai.ipynb b/docs/core_docs/docs/integrations/chat/togetherai.ipynb
index 8ed09f8d41c6..9dd6efa2ff81 100644
--- a/docs/core_docs/docs/integrations/chat/togetherai.ipynb
+++ b/docs/core_docs/docs/integrations/chat/togetherai.ipynb
@@ -21,7 +21,9 @@
"source": [
"# ChatTogetherAI\n",
"\n",
- "This will help you getting started with `ChatTogetherAI` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatTogetherAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html).\n",
+ "[Together AI](https://www.together.ai/) offers an API to query [50+ leading open-source models](https://docs.together.ai/docs/inference-models) in a couple lines of code.\n",
+ "\n",
+ "This guide will help you getting started with `ChatTogetherAI` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatTogetherAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html).\n",
"\n",
"## Overview\n",
"### Integration details\n",
@@ -31,6 +33,9 @@
"| [ChatTogetherAI](https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_chat_models_togetherai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | \n",
@@ -93,9 +98,6 @@
"const llm = new ChatTogetherAI({\n",
" model: \"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n",
" temperature: 0,\n",
- " maxTokens: undefined,\n",
- " timeout: undefined,\n",
- " maxRetries: 2,\n",
" // other params...\n",
"})"
]
@@ -238,73 +240,6 @@
")"
]
},
- {
- "cell_type": "markdown",
- "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
- "metadata": {},
- "source": [
- "## Tool calling & JSON mode\n",
- "\n",
- "The TogetherAI chat supports JSON mode and calling tools.\n",
- "\n",
- "### Tool calling"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "id": "8de584a8",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[\n",
- " {\n",
- " name: 'calculator',\n",
- " args: { input: '2 + 3' },\n",
- " type: 'tool_call',\n",
- " id: 'call_nhtnmganqJPAG9I1cN8ULI9R'\n",
- " }\n",
- "]\n"
- ]
- }
- ],
- "source": [
- "import { ChatTogetherAI } from \"@langchain/community/chat_models/togetherai\";\n",
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
- "import { convertToOpenAITool } from \"@langchain/core/utils/function_calling\";\n",
- "import { Calculator } from \"@langchain/community/tools/calculator\";\n",
- "\n",
- "// Use a pre-built tool\n",
- "const calculatorTool = convertToOpenAITool(new Calculator());\n",
- "\n",
- "const modelWithCalculator = new ChatTogetherAI({\n",
- " temperature: 0,\n",
- " // This is the default env variable name it will look for if none is passed.\n",
- " apiKey: process.env.TOGETHER_AI_API_KEY,\n",
- " // Together JSON mode/tool calling only supports a select number of models\n",
- " model: \"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n",
- "}).bind({\n",
- " // Bind the tool to the model.\n",
- " tools: [calculatorTool],\n",
- " tool_choice: calculatorTool, // Specify what tool the model should use\n",
- "});\n",
- "\n",
- "const promptForTools = ChatPromptTemplate.fromMessages([\n",
- " [\"system\", \"You are a super not-so-smart mathmatician.\"],\n",
- " [\"human\", \"Help me out, how can I add {math}?\"],\n",
- "]);\n",
- "\n",
- "// Use LCEL to chain the prompt to the model.\n",
- "const responseWithTool = await promptForTools.pipe(modelWithCalculator).invoke({\n",
- " math: \"2 plus 3\",\n",
- "});\n",
- "\n",
- "console.dir(responseWithTool.tool_calls, { depth: null });"
- ]
- },
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
@@ -312,8 +247,6 @@
"source": [
"Behind the scenes, TogetherAI uses the OpenAI SDK and OpenAI compatible API, with some caveats:\n",
"\n",
- "- Certain properties are not supported by the TogetherAI API, see [here](https://docs.together.ai/reference/chat-completions).\n",
- "\n",
"## API reference\n",
"\n",
"For detailed documentation of all ChatTogetherAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html"
diff --git a/docs/core_docs/docs/integrations/chat/web_llm.mdx b/docs/core_docs/docs/integrations/chat/web_llm.mdx
index 1745222bf470..2b8add074b32 100644
--- a/docs/core_docs/docs/integrations/chat/web_llm.mdx
+++ b/docs/core_docs/docs/integrations/chat/web_llm.mdx
@@ -40,3 +40,8 @@ Streaming is also supported.
## Example
For a full end-to-end example, check out [this project](https://github.com/jacoblee93/fully-local-pdf-chatbot).
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/yandex.mdx b/docs/core_docs/docs/integrations/chat/yandex.mdx
index b54fc8920ccc..d600c6658360 100644
--- a/docs/core_docs/docs/integrations/chat/yandex.mdx
+++ b/docs/core_docs/docs/integrations/chat/yandex.mdx
@@ -31,3 +31,8 @@ import CodeBlock from "@theme/CodeBlock";
import YandexGPTChatExample from "@examples/models/chat/integration_yandex.ts";
{YandexGPTChatExample}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/chat/zhipuai.mdx b/docs/core_docs/docs/integrations/chat/zhipuai.mdx
index fd4035ad0b9e..97d154fdcf00 100644
--- a/docs/core_docs/docs/integrations/chat/zhipuai.mdx
+++ b/docs/core_docs/docs/integrations/chat/zhipuai.mdx
@@ -33,3 +33,8 @@ Here's an example:
import ZhipuAI from "@examples/models/chat/integration_zhipuai.ts";
{ZhipuAI}
+
+## Related
+
+- Chat model [conceptual guide](/docs/concepts/#chat-models)
+- Chat model [how-to guides](/docs/how_to/#chat-models)
diff --git a/docs/core_docs/docs/integrations/llms/ai21.mdx b/docs/core_docs/docs/integrations/llms/ai21.mdx
index 16abd7794527..fbf392faaccc 100644
--- a/docs/core_docs/docs/integrations/llms/ai21.mdx
+++ b/docs/core_docs/docs/integrations/llms/ai21.mdx
@@ -16,3 +16,8 @@ import CodeBlock from "@theme/CodeBlock";
import AI21Example from "@examples/models/llm/ai21.ts";
{AI21Example}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/aleph_alpha.mdx b/docs/core_docs/docs/integrations/llms/aleph_alpha.mdx
index f88815bae672..dc744330c1d7 100644
--- a/docs/core_docs/docs/integrations/llms/aleph_alpha.mdx
+++ b/docs/core_docs/docs/integrations/llms/aleph_alpha.mdx
@@ -16,3 +16,8 @@ import CodeBlock from "@theme/CodeBlock";
import AlephAlphaExample from "@examples/models/llm/aleph_alpha.ts";
{AlephAlphaExample}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/aws_sagemaker.mdx b/docs/core_docs/docs/integrations/llms/aws_sagemaker.mdx
index 3221c2fe2292..1f65b5feb246 100644
--- a/docs/core_docs/docs/integrations/llms/aws_sagemaker.mdx
+++ b/docs/core_docs/docs/integrations/llms/aws_sagemaker.mdx
@@ -24,3 +24,8 @@ import CodeBlock from "@theme/CodeBlock";
import SageMakerEndpointExample from "@examples/models/llm/sagemaker_endpoint.ts";
{SageMakerEndpointExample}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/chrome_ai.mdx b/docs/core_docs/docs/integrations/llms/chrome_ai.mdx
index 1a70fba38251..9f52e87c4f68 100644
--- a/docs/core_docs/docs/integrations/llms/chrome_ai.mdx
+++ b/docs/core_docs/docs/integrations/llms/chrome_ai.mdx
@@ -113,3 +113,8 @@ for await (const chunk of await model.stream("How are you?")) {
?
*/
```
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/cloudflare_workersai.mdx b/docs/core_docs/docs/integrations/llms/cloudflare_workersai.mdx
index abbfc7377006..62da205f795b 100644
--- a/docs/core_docs/docs/integrations/llms/cloudflare_workersai.mdx
+++ b/docs/core_docs/docs/integrations/llms/cloudflare_workersai.mdx
@@ -22,3 +22,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/models/llm/cloudflare_workersai.ts";
{Example}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/deep_infra.mdx b/docs/core_docs/docs/integrations/llms/deep_infra.mdx
index 76e75db0e134..3e6aab447937 100644
--- a/docs/core_docs/docs/integrations/llms/deep_infra.mdx
+++ b/docs/core_docs/docs/integrations/llms/deep_infra.mdx
@@ -23,3 +23,8 @@ You'll need to obtain an API key and set it as an environment variable named `DE
import Example from "@examples/models/llm/deepinfra.ts";
{Example}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/friendli.mdx b/docs/core_docs/docs/integrations/llms/friendli.mdx
index ffe0fd4c5afc..0582e39a6a26 100644
--- a/docs/core_docs/docs/integrations/llms/friendli.mdx
+++ b/docs/core_docs/docs/integrations/llms/friendli.mdx
@@ -27,3 +27,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/models/llm/friendli.ts";
{Example}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/google_palm.mdx b/docs/core_docs/docs/integrations/llms/google_palm.mdx
index e1b25cb6dd4b..28948cad384e 100644
--- a/docs/core_docs/docs/integrations/llms/google_palm.mdx
+++ b/docs/core_docs/docs/integrations/llms/google_palm.mdx
@@ -130,3 +130,8 @@ Streaming in multiple chunks is supported for faster responses:
import GoogleVertexAIStreaming from "@examples/llms/googlevertexai-streaming_legacy.ts";
{GoogleVertexAIStreaming}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/gradient_ai.mdx b/docs/core_docs/docs/integrations/llms/gradient_ai.mdx
index 89bad687e0cb..a3feb9007a96 100644
--- a/docs/core_docs/docs/integrations/llms/gradient_ai.mdx
+++ b/docs/core_docs/docs/integrations/llms/gradient_ai.mdx
@@ -52,3 +52,8 @@ import GradientLLMAdapterExample from "@examples/llms/gradient_ai-adapter.ts";
The use your own custom adapter simply set `adapterId` during setup.
{GradientLLMAdapterExample}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/huggingface_inference.mdx b/docs/core_docs/docs/integrations/llms/huggingface_inference.mdx
index e9e917f739f0..1c9e322ee117 100644
--- a/docs/core_docs/docs/integrations/llms/huggingface_inference.mdx
+++ b/docs/core_docs/docs/integrations/llms/huggingface_inference.mdx
@@ -20,3 +20,8 @@ const model = new HuggingFaceInference({
const res = await model.invoke("1 + 1 =");
console.log({ res });
```
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/layerup_security.mdx b/docs/core_docs/docs/integrations/llms/layerup_security.mdx
index 983509419177..10a5172d0700 100644
--- a/docs/core_docs/docs/integrations/llms/layerup_security.mdx
+++ b/docs/core_docs/docs/integrations/llms/layerup_security.mdx
@@ -29,3 +29,8 @@ And now you're ready to start protecting your LLM calls with Layerup Security!
import LayerupSecurityExampleCode from "@examples/llms/layerup_security.ts";
{LayerupSecurityExampleCode}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/llama_cpp.mdx b/docs/core_docs/docs/integrations/llms/llama_cpp.mdx
index 66f35aeb529b..cc36f3284d83 100644
--- a/docs/core_docs/docs/integrations/llms/llama_cpp.mdx
+++ b/docs/core_docs/docs/integrations/llms/llama_cpp.mdx
@@ -119,3 +119,8 @@ import LlamaCppExample from "@examples/models/llm/llama_cpp.ts";
import LlamaCppStreamExample from "@examples/models/llm/llama_cpp_stream.ts";
{LlamaCppStreamExample};
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/ni_bittensor.mdx b/docs/core_docs/docs/integrations/llms/ni_bittensor.mdx
index 84dea5a208c9..44d675a77de0 100644
--- a/docs/core_docs/docs/integrations/llms/ni_bittensor.mdx
+++ b/docs/core_docs/docs/integrations/llms/ni_bittensor.mdx
@@ -27,3 +27,8 @@ console.log({ res });
}
*/
```
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/ollama.mdx b/docs/core_docs/docs/integrations/llms/ollama.mdx
index ae8859615877..8077d63ce978 100644
--- a/docs/core_docs/docs/integrations/llms/ollama.mdx
+++ b/docs/core_docs/docs/integrations/llms/ollama.mdx
@@ -40,3 +40,8 @@ You can bind base64 encoded image data to multimodal-capable models to use as co
import OllamaMultimodalExample from "@examples/models/llm/ollama_multimodal.ts";
{OllamaMultimodalExample}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/prompt_layer_openai.mdx b/docs/core_docs/docs/integrations/llms/prompt_layer_openai.mdx
index 9616e67fba77..29a7a9d5cf8a 100644
--- a/docs/core_docs/docs/integrations/llms/prompt_layer_openai.mdx
+++ b/docs/core_docs/docs/integrations/llms/prompt_layer_openai.mdx
@@ -54,3 +54,8 @@ const res = await model.invoke(
The request and the response will be logged in the [PromptLayer dashboard](https://promptlayer.com/home).
> **_Note:_** In streaming mode PromptLayer will not log the response.
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/raycast.mdx b/docs/core_docs/docs/integrations/llms/raycast.mdx
index 10784823c1dd..77f9b017249d 100644
--- a/docs/core_docs/docs/integrations/llms/raycast.mdx
+++ b/docs/core_docs/docs/integrations/llms/raycast.mdx
@@ -20,3 +20,8 @@ import CodeBlock from "@theme/CodeBlock";
import RaycastAIExample from "@examples/models/llm/raycast.ts";
{RaycastAIExample}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/replicate.mdx b/docs/core_docs/docs/integrations/llms/replicate.mdx
index 536f19109f07..bdf7f4e72ed1 100644
--- a/docs/core_docs/docs/integrations/llms/replicate.mdx
+++ b/docs/core_docs/docs/integrations/llms/replicate.mdx
@@ -19,3 +19,8 @@ import ReplicateLlama2 from "@examples/models/llm/replicate_llama2.ts";
You can run other models through Replicate by changing the `model` parameter.
You can find a full list of models on [Replicate's website](https://replicate.com/explore).
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/togetherai.mdx b/docs/core_docs/docs/integrations/llms/togetherai.mdx
index e23a496bdf8c..05502459cabe 100644
--- a/docs/core_docs/docs/integrations/llms/togetherai.mdx
+++ b/docs/core_docs/docs/integrations/llms/togetherai.mdx
@@ -32,3 +32,8 @@ Together AI also supports streaming, this example demonstrates how to use this f
:::info
You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/26b5716e-6f00-47c1-aa71-1838a1eddbd1/r)
:::
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/watsonx_ai.mdx b/docs/core_docs/docs/integrations/llms/watsonx_ai.mdx
index d832565c78ad..67ca727528b0 100644
--- a/docs/core_docs/docs/integrations/llms/watsonx_ai.mdx
+++ b/docs/core_docs/docs/integrations/llms/watsonx_ai.mdx
@@ -33,3 +33,8 @@ import CodeBlock from "@theme/CodeBlock";
import WatsonxAiExample from "@examples/llms/watsonx_ai.ts";
{WatsonxAiExample}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/writer.mdx b/docs/core_docs/docs/integrations/llms/writer.mdx
index 3f0c11e2cd76..4371dc66aa4c 100644
--- a/docs/core_docs/docs/integrations/llms/writer.mdx
+++ b/docs/core_docs/docs/integrations/llms/writer.mdx
@@ -26,3 +26,8 @@ import CodeBlock from "@theme/CodeBlock";
import WriterExample from "@examples/models/llm/writer.ts";
{WriterExample}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/llms/yandex.mdx b/docs/core_docs/docs/integrations/llms/yandex.mdx
index ed0ca54f9284..55f7009f9084 100644
--- a/docs/core_docs/docs/integrations/llms/yandex.mdx
+++ b/docs/core_docs/docs/integrations/llms/yandex.mdx
@@ -27,3 +27,8 @@ import CodeBlock from "@theme/CodeBlock";
import YandexGPTExample from "@examples/models/llm/yandex.ts";
{YandexGPTExample}
+
+## Related
+
+- LLM [conceptual guide](/docs/concepts/#llms)
+- LLM [how-to guides](/docs/how_to/#llms)
diff --git a/docs/core_docs/docs/integrations/retrievers/chaindesk-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/chaindesk-retriever.mdx
index 4610f5b5d148..dc0acbbe613a 100644
--- a/docs/core_docs/docs/integrations/retrievers/chaindesk-retriever.mdx
+++ b/docs/core_docs/docs/integrations/retrievers/chaindesk-retriever.mdx
@@ -16,3 +16,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/retrievers/chaindesk.ts";
{Example}
+
+## Related
+
+- Retriever [conceptual guide](/docs/concepts/#retrievers)
+- Retriever [how-to guides](/docs/how_to/#retrievers)
diff --git a/docs/core_docs/docs/integrations/retrievers/chatgpt-retriever-plugin.mdx b/docs/core_docs/docs/integrations/retrievers/chatgpt-retriever-plugin.mdx
index 3f5206a1f1ff..42a535462bdd 100644
--- a/docs/core_docs/docs/integrations/retrievers/chatgpt-retriever-plugin.mdx
+++ b/docs/core_docs/docs/integrations/retrievers/chatgpt-retriever-plugin.mdx
@@ -28,3 +28,8 @@ const docs = await retriever.invoke("hello world");
console.log(docs);
```
+
+## Related
+
+- Retriever [conceptual guide](/docs/concepts/#retrievers)
+- Retriever [how-to guides](/docs/how_to/#retrievers)
diff --git a/docs/core_docs/docs/integrations/retrievers/dria.mdx b/docs/core_docs/docs/integrations/retrievers/dria.mdx
index 761c4df44b97..a4f325dfb3f2 100644
--- a/docs/core_docs/docs/integrations/retrievers/dria.mdx
+++ b/docs/core_docs/docs/integrations/retrievers/dria.mdx
@@ -37,3 +37,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/retrievers/dria.ts";
{Example}
+
+## Related
+
+- Retriever [conceptual guide](/docs/concepts/#retrievers)
+- Retriever [how-to guides](/docs/how_to/#retrievers)
diff --git a/docs/core_docs/docs/integrations/retrievers/hyde.mdx b/docs/core_docs/docs/integrations/retrievers/hyde.mdx
index f704c37749c4..20b7395b92af 100644
--- a/docs/core_docs/docs/integrations/retrievers/hyde.mdx
+++ b/docs/core_docs/docs/integrations/retrievers/hyde.mdx
@@ -24,3 +24,8 @@ npm install @langchain/openai
```
{Example}
+
+## Related
+
+- Retriever [conceptual guide](/docs/concepts/#retrievers)
+- Retriever [how-to guides](/docs/how_to/#retrievers)
diff --git a/docs/core_docs/docs/integrations/retrievers/metal-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/metal-retriever.mdx
index bdec229eee9c..19f6ce018075 100644
--- a/docs/core_docs/docs/integrations/retrievers/metal-retriever.mdx
+++ b/docs/core_docs/docs/integrations/retrievers/metal-retriever.mdx
@@ -22,3 +22,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/retrievers/metal.ts";
{Example}
+
+## Related
+
+- Retriever [conceptual guide](/docs/concepts/#retrievers)
+- Retriever [how-to guides](/docs/how_to/#retrievers)
diff --git a/docs/core_docs/docs/integrations/retrievers/supabase-hybrid.mdx b/docs/core_docs/docs/integrations/retrievers/supabase-hybrid.mdx
index 2c0cf7a9e5a3..7e3a67deb296 100644
--- a/docs/core_docs/docs/integrations/retrievers/supabase-hybrid.mdx
+++ b/docs/core_docs/docs/integrations/retrievers/supabase-hybrid.mdx
@@ -85,3 +85,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/retrievers/supabase_hybrid.ts";
{Example}
+
+## Related
+
+- Retriever [conceptual guide](/docs/concepts/#retrievers)
+- Retriever [how-to guides](/docs/how_to/#retrievers)
diff --git a/docs/core_docs/docs/integrations/retrievers/time-weighted-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/time-weighted-retriever.mdx
index 4991b801f30f..31486403228c 100644
--- a/docs/core_docs/docs/integrations/retrievers/time-weighted-retriever.mdx
+++ b/docs/core_docs/docs/integrations/retrievers/time-weighted-retriever.mdx
@@ -29,3 +29,8 @@ npm install @langchain/openai
```
{Example}
+
+## Related
+
+- Retriever [conceptual guide](/docs/concepts/#retrievers)
+- Retriever [how-to guides](/docs/how_to/#retrievers)
diff --git a/docs/core_docs/docs/integrations/retrievers/vespa-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/vespa-retriever.mdx
index 55a2a1b6a063..b3a75fa44cf9 100644
--- a/docs/core_docs/docs/integrations/retrievers/vespa-retriever.mdx
+++ b/docs/core_docs/docs/integrations/retrievers/vespa-retriever.mdx
@@ -25,3 +25,8 @@ If this is your case, you can, for instance set up a [CloudFlare Worker](https:/
that contains the necessary credentials to connect to the instance.
Now you can return the results and continue using them in LangChain.
+
+## Related
+
+- Retriever [conceptual guide](/docs/concepts/#retrievers)
+- Retriever [how-to guides](/docs/how_to/#retrievers)
diff --git a/docs/core_docs/docs/integrations/retrievers/zep-cloud-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/zep-cloud-retriever.mdx
index 13c48f5dc68d..28af48635893 100644
--- a/docs/core_docs/docs/integrations/retrievers/zep-cloud-retriever.mdx
+++ b/docs/core_docs/docs/integrations/retrievers/zep-cloud-retriever.mdx
@@ -34,3 +34,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/retrievers/zep_cloud.ts";
{Example}
+
+## Related
+
+- Retriever [conceptual guide](/docs/concepts/#retrievers)
+- Retriever [how-to guides](/docs/how_to/#retrievers)
diff --git a/docs/core_docs/docs/integrations/retrievers/zep-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/zep-retriever.mdx
index f4326de54572..58d593e546ba 100644
--- a/docs/core_docs/docs/integrations/retrievers/zep-retriever.mdx
+++ b/docs/core_docs/docs/integrations/retrievers/zep-retriever.mdx
@@ -32,3 +32,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/retrievers/zep.ts";
{Example}
+
+## Related
+
+- Retriever [conceptual guide](/docs/concepts/#retrievers)
+- Retriever [how-to guides](/docs/how_to/#retrievers)
diff --git a/docs/core_docs/docs/integrations/stores/cassandra_storage.mdx b/docs/core_docs/docs/integrations/stores/cassandra_storage.mdx
index 8e71e00716d7..c054c80c5000 100644
--- a/docs/core_docs/docs/integrations/stores/cassandra_storage.mdx
+++ b/docs/core_docs/docs/integrations/stores/cassandra_storage.mdx
@@ -58,3 +58,7 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/stores/cassandra_storage.ts";
{Example}
+
+## Related
+
+- [Key-value store conceptual guide](/docs/concepts/#key-value-stores)
diff --git a/docs/core_docs/docs/integrations/stores/ioredis_storage.mdx b/docs/core_docs/docs/integrations/stores/ioredis_storage.mdx
index 92b15ecfb7e6..f62653356941 100644
--- a/docs/core_docs/docs/integrations/stores/ioredis_storage.mdx
+++ b/docs/core_docs/docs/integrations/stores/ioredis_storage.mdx
@@ -14,3 +14,7 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/stores/ioredis_storage.ts";
{Example}
+
+## Related
+
+- [Key-value store conceptual guide](/docs/concepts/#key-value-stores)
diff --git a/docs/core_docs/docs/integrations/stores/upstash_redis_storage.mdx b/docs/core_docs/docs/integrations/stores/upstash_redis_storage.mdx
index 3a74ee292295..8a94fbe3d625 100644
--- a/docs/core_docs/docs/integrations/stores/upstash_redis_storage.mdx
+++ b/docs/core_docs/docs/integrations/stores/upstash_redis_storage.mdx
@@ -14,3 +14,7 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/stores/upstash_redis_storage.ts";
{Example}
+
+## Related
+
+- [Key-value store conceptual guide](/docs/concepts/#key-value-stores)
diff --git a/docs/core_docs/docs/integrations/stores/vercel_kv_storage.mdx b/docs/core_docs/docs/integrations/stores/vercel_kv_storage.mdx
index e45013f73562..d4540df1ecf2 100644
--- a/docs/core_docs/docs/integrations/stores/vercel_kv_storage.mdx
+++ b/docs/core_docs/docs/integrations/stores/vercel_kv_storage.mdx
@@ -14,3 +14,7 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/stores/vercel_kv_storage.ts";
{Example}
+
+## Related
+
+- [Key-value store conceptual guide](/docs/concepts/#key-value-stores)
diff --git a/docs/core_docs/docs/integrations/text_embedding/alibaba_tongyi.mdx b/docs/core_docs/docs/integrations/text_embedding/alibaba_tongyi.mdx
index 5b16a9d5fbc5..7e8141406435 100644
--- a/docs/core_docs/docs/integrations/text_embedding/alibaba_tongyi.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/alibaba_tongyi.mdx
@@ -26,3 +26,8 @@ import CodeBlock from "@theme/CodeBlock";
import AlibabaTongyiExample from "@examples/embeddings/alibaba_tongyi.ts";
{AlibabaTongyiExample}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/baidu_qianfan.mdx b/docs/core_docs/docs/integrations/text_embedding/baidu_qianfan.mdx
index 116e75d20239..80b2c174aa58 100644
--- a/docs/core_docs/docs/integrations/text_embedding/baidu_qianfan.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/baidu_qianfan.mdx
@@ -24,3 +24,8 @@ import CodeBlock from "@theme/CodeBlock";
import BaiduQianFanExample from "@examples/embeddings/baidu_qianfan.ts";
{BaiduQianFanExample}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/bedrock.mdx b/docs/core_docs/docs/integrations/text_embedding/bedrock.mdx
index cbaca2d89140..0ab08081d607 100644
--- a/docs/core_docs/docs/integrations/text_embedding/bedrock.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/bedrock.mdx
@@ -45,3 +45,8 @@ const embeddings = new BedrockEmbeddings({
client,
});
```
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.mdx b/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.mdx
index a643a5be02e6..a87116810f38 100644
--- a/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.mdx
@@ -45,3 +45,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/indexes/vector_stores/cloudflare_vectorize/example.ts";
{Example}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/deepinfra.mdx b/docs/core_docs/docs/integrations/text_embedding/deepinfra.mdx
index 34b6b942bd7a..a05bf320dd25 100644
--- a/docs/core_docs/docs/integrations/text_embedding/deepinfra.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/deepinfra.mdx
@@ -125,3 +125,8 @@ runExample();
## Feedback and Support
For feedback or questions, please contact [feedback@deepinfra.com](mailto:feedback@deepinfra.com).
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/google_generativeai.mdx b/docs/core_docs/docs/integrations/text_embedding/google_generativeai.mdx
index 752849cfb16b..9e4fb1e97769 100644
--- a/docs/core_docs/docs/integrations/text_embedding/google_generativeai.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/google_generativeai.mdx
@@ -26,3 +26,8 @@ npm install @langchain/google-genai
import GoogleGenerativeAIExample from "@examples/models/embeddings/googlegenerativeai.ts";
{GoogleGenerativeAIExample}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/google_palm.mdx b/docs/core_docs/docs/integrations/text_embedding/google_palm.mdx
index f1ececd44bce..36f1fc2d9eda 100644
--- a/docs/core_docs/docs/integrations/text_embedding/google_palm.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/google_palm.mdx
@@ -32,3 +32,8 @@ the model.
import GooglePaLMExample from "@examples/models/embeddings/googlepalm.ts";
{GooglePaLMExample}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.mdx b/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.mdx
index 48f370840567..54e57020113d 100644
--- a/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.mdx
@@ -34,3 +34,8 @@ import GoogleVertexAIExample from "@examples/models/embeddings/googlevertexai.ts
**Note:** The default Google Vertex AI embeddings model, `textembedding-gecko`, has a different number of dimensions than OpenAI's `text-embedding-ada-002` model
and may not be supported by all vector store providers.
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/gradient_ai.mdx b/docs/core_docs/docs/integrations/text_embedding/gradient_ai.mdx
index a8f9cda8daa2..95eca64ecb65 100644
--- a/docs/core_docs/docs/integrations/text_embedding/gradient_ai.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/gradient_ai.mdx
@@ -37,3 +37,8 @@ import CodeBlock from "@theme/CodeBlock";
import GradientEmbeddingsExample from "@examples/embeddings/gradient_ai.ts";
{GradientEmbeddingsExample}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/hugging_face_inference.mdx b/docs/core_docs/docs/integrations/text_embedding/hugging_face_inference.mdx
index 4e741e9ddf2f..a43457b6b73c 100644
--- a/docs/core_docs/docs/integrations/text_embedding/hugging_face_inference.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/hugging_face_inference.mdx
@@ -23,3 +23,8 @@ const embeddings = new HuggingFaceInferenceEmbeddings({
apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.HUGGINGFACEHUB_API_KEY
});
```
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/jina.mdx b/docs/core_docs/docs/integrations/text_embedding/jina.mdx
index a9dfe8c023e0..4eb6008822b2 100644
--- a/docs/core_docs/docs/integrations/text_embedding/jina.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/jina.mdx
@@ -108,3 +108,8 @@ runExample();
## Feedback and Support
For feedback or questions, please contact [support@jina.ai](mailto:support@jina.ai).
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx b/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx
index 671ee37de74e..e39b54b8ab17 100644
--- a/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx
@@ -50,3 +50,8 @@ import BasicExample from "@examples/embeddings/llama_cpp_basic.ts";
import DocsExample from "@examples/embeddings/llama_cpp_docs.ts";
{DocsExample}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/minimax.mdx b/docs/core_docs/docs/integrations/text_embedding/minimax.mdx
index bd82d3450336..f6e1a881fc20 100644
--- a/docs/core_docs/docs/integrations/text_embedding/minimax.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/minimax.mdx
@@ -24,3 +24,8 @@ export const run = async () => {
console.log({ documentRes });
};
```
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/mixedbread_ai.mdx b/docs/core_docs/docs/integrations/text_embedding/mixedbread_ai.mdx
index 6e28a90dc1ae..824a616fdeea 100644
--- a/docs/core_docs/docs/integrations/text_embedding/mixedbread_ai.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/mixedbread_ai.mdx
@@ -83,3 +83,8 @@ try {
console.error(error);
}
```
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/nomic.mdx b/docs/core_docs/docs/integrations/text_embedding/nomic.mdx
index 9eadde113c66..232bd991a13f 100644
--- a/docs/core_docs/docs/integrations/text_embedding/nomic.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/nomic.mdx
@@ -27,3 +27,8 @@ import CodeBlock from "@theme/CodeBlock";
import NomicExample from "@examples/models/embeddings/nomic.ts";
{NomicExample}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/ollama.mdx b/docs/core_docs/docs/integrations/text_embedding/ollama.mdx
index 2bfc91103944..0b5ee1145955 100644
--- a/docs/core_docs/docs/integrations/text_embedding/ollama.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/ollama.mdx
@@ -64,3 +64,8 @@ const documentEmbeddings = await embeddings.embedDocuments(documents);
console.log(documentEmbeddings);
```
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/premai.mdx b/docs/core_docs/docs/integrations/text_embedding/premai.mdx
index 57bb2a3fbaea..71705ecb963a 100644
--- a/docs/core_docs/docs/integrations/text_embedding/premai.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/premai.mdx
@@ -26,3 +26,8 @@ import CodeBlock from "@theme/CodeBlock";
import PremExample from "@examples/embeddings/premai.ts";
{PremExample}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/tencent_hunyuan.mdx b/docs/core_docs/docs/integrations/text_embedding/tencent_hunyuan.mdx
index 0020db655ba8..1b8afe372338 100644
--- a/docs/core_docs/docs/integrations/text_embedding/tencent_hunyuan.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/tencent_hunyuan.mdx
@@ -36,3 +36,8 @@ import CodeBlock from "@theme/CodeBlock";
import TencentHunyuan from "@examples/models/embeddings/tencent_hunyuan.ts";
{TencentHunyuan}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx b/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx
index 6ab3fa5e903b..7192643f78a9 100644
--- a/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx
@@ -14,3 +14,8 @@ const embeddings = new TensorFlowEmbeddings();
```
This example uses the CPU backend, which works in any JS environment. However, you can use any of the backends supported by TensorFlow.js, including GPU and WebAssembly, which will be a lot faster. For Node.js you can use the `@tensorflow/tfjs-node` package, and for the browser you can use the `@tensorflow/tfjs-backend-webgl` package. See the [TensorFlow.js documentation](https://www.tensorflow.org/js/guide/platform_environment) for more information.
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/transformers.mdx b/docs/core_docs/docs/integrations/text_embedding/transformers.mdx
index a11c28c0107d..f0211b07d2c6 100644
--- a/docs/core_docs/docs/integrations/text_embedding/transformers.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/transformers.mdx
@@ -33,3 +33,8 @@ set up your project.
import HFTransformersExample from "@examples/models/embeddings/hf_transformers.ts";
{HFTransformersExample}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/voyageai.mdx b/docs/core_docs/docs/integrations/text_embedding/voyageai.mdx
index 798302b535cc..3ebbb6cc621c 100644
--- a/docs/core_docs/docs/integrations/text_embedding/voyageai.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/voyageai.mdx
@@ -16,3 +16,8 @@ const embeddings = new VoyageEmbeddings({
inputType: "document", // Optional: specify input type as 'query', 'document', or omit for None / Undefined / Null
});
```
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/text_embedding/zhipuai.mdx b/docs/core_docs/docs/integrations/text_embedding/zhipuai.mdx
index a8a3cb3db78d..583c3e09cd25 100644
--- a/docs/core_docs/docs/integrations/text_embedding/zhipuai.mdx
+++ b/docs/core_docs/docs/integrations/text_embedding/zhipuai.mdx
@@ -28,3 +28,8 @@ import CodeBlock from "@theme/CodeBlock";
import ZhipuAIExample from "@examples/embeddings/zhipuai.ts";
{ZhipuAIExample}
+
+## Related
+
+- Embedding model [conceptual guide](/docs/concepts/#embedding-models)
+- Embedding model [how-to guides](/docs/how_to/#embedding-models)
diff --git a/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx b/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx
index 69df419c81c0..2f296b4e7e61 100644
--- a/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx
+++ b/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx
@@ -60,3 +60,8 @@ Finished chain.
}
}
````
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/azure_dynamic_sessions.mdx b/docs/core_docs/docs/integrations/tools/azure_dynamic_sessions.mdx
index 89aad3b76a91..1453265a1426 100644
--- a/docs/core_docs/docs/integrations/tools/azure_dynamic_sessions.mdx
+++ b/docs/core_docs/docs/integrations/tools/azure_dynamic_sessions.mdx
@@ -40,3 +40,8 @@ Here is a complete example where we use an Azure OpenAI chat model to call the P
import AgentExample from "@examples/tools/azure_dynamic_sessions/azure_dynamic_sessions-agent.ts";
{AgentExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/connery.mdx b/docs/core_docs/docs/integrations/tools/connery.mdx
index 658dccfd0836..a12470fe5255 100644
--- a/docs/core_docs/docs/integrations/tools/connery.mdx
+++ b/docs/core_docs/docs/integrations/tools/connery.mdx
@@ -60,3 +60,8 @@ You can see a LangSmith trace of this example [here](https://smith.langchain.com
:::note
Connery Action is a structured tool, so you can only use it in the agents supporting structured tools.
:::
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/dalle.mdx b/docs/core_docs/docs/integrations/tools/dalle.mdx
index 45fd604c5e4f..61b84450453b 100644
--- a/docs/core_docs/docs/integrations/tools/dalle.mdx
+++ b/docs/core_docs/docs/integrations/tools/dalle.mdx
@@ -25,3 +25,8 @@ npm install @langchain/openai
```
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/discord.mdx b/docs/core_docs/docs/integrations/tools/discord.mdx
index 9f8429a3597c..12fd25cfec61 100644
--- a/docs/core_docs/docs/integrations/tools/discord.mdx
+++ b/docs/core_docs/docs/integrations/tools/discord.mdx
@@ -36,3 +36,8 @@ npm install @langchain/openai
import AgentExample from "@examples/agents/discord.ts";
{AgentExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/exa_search.mdx b/docs/core_docs/docs/integrations/tools/exa_search.mdx
index 800bc1cc92da..14f24bf50b25 100644
--- a/docs/core_docs/docs/integrations/tools/exa_search.mdx
+++ b/docs/core_docs/docs/integrations/tools/exa_search.mdx
@@ -68,3 +68,8 @@ import AgentExample from "@examples/tools/exa_agent.ts";
:::tip
You can see a LangSmith trace for this example [here](https://smith.langchain.com/public/d123ba5f-8535-4669-9e43-ac7ab3c6735e/r).
:::
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/gmail.mdx b/docs/core_docs/docs/integrations/tools/gmail.mdx
index 2e953f1ead94..b92399a484e8 100644
--- a/docs/core_docs/docs/integrations/tools/gmail.mdx
+++ b/docs/core_docs/docs/integrations/tools/gmail.mdx
@@ -29,3 +29,8 @@ npm install @langchain/openai googleapis @langchain/community
import ToolExample from "@examples/tools/gmail.ts";
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/google_calendar.mdx b/docs/core_docs/docs/integrations/tools/google_calendar.mdx
index e92c422a49dc..7b4f6107be63 100644
--- a/docs/core_docs/docs/integrations/tools/google_calendar.mdx
+++ b/docs/core_docs/docs/integrations/tools/google_calendar.mdx
@@ -29,3 +29,8 @@ npm install @langchain/openai
```
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/google_places.mdx b/docs/core_docs/docs/integrations/tools/google_places.mdx
index 7e9599e8f6d5..fe39e3aee9b8 100644
--- a/docs/core_docs/docs/integrations/tools/google_places.mdx
+++ b/docs/core_docs/docs/integrations/tools/google_places.mdx
@@ -28,3 +28,8 @@ npm install @langchain/openai @langchain/community
import ToolExample from "@examples/tools/google_places.ts";
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/google_routes.mdx b/docs/core_docs/docs/integrations/tools/google_routes.mdx
index 1f13182414be..16177d2ea710 100644
--- a/docs/core_docs/docs/integrations/tools/google_routes.mdx
+++ b/docs/core_docs/docs/integrations/tools/google_routes.mdx
@@ -28,3 +28,8 @@ npm install @langchain/openai @langchain/community
import ToolExample from "@examples/tools/google_routes.ts";
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/lambda_agent.mdx b/docs/core_docs/docs/integrations/tools/lambda_agent.mdx
index 70f14cc1560f..bf07d3878932 100644
--- a/docs/core_docs/docs/integrations/tools/lambda_agent.mdx
+++ b/docs/core_docs/docs/integrations/tools/lambda_agent.mdx
@@ -54,3 +54,8 @@ const input = `Find out the capital of Croatia. Once you have it, email the answ
const result = await executor.invoke({ input });
console.log(result);
```
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/pyinterpreter.mdx b/docs/core_docs/docs/integrations/tools/pyinterpreter.mdx
index f67efceeca5e..65667106c599 100644
--- a/docs/core_docs/docs/integrations/tools/pyinterpreter.mdx
+++ b/docs/core_docs/docs/integrations/tools/pyinterpreter.mdx
@@ -26,3 +26,8 @@ npm install @langchain/openai
```
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/searchapi.mdx b/docs/core_docs/docs/integrations/tools/searchapi.mdx
index fbd44098e402..acc7ad0d75e4 100644
--- a/docs/core_docs/docs/integrations/tools/searchapi.mdx
+++ b/docs/core_docs/docs/integrations/tools/searchapi.mdx
@@ -25,3 +25,8 @@ npm install @langchain/openai
```
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/searxng.mdx b/docs/core_docs/docs/integrations/tools/searxng.mdx
index 08429257be96..b02102137404 100644
--- a/docs/core_docs/docs/integrations/tools/searxng.mdx
+++ b/docs/core_docs/docs/integrations/tools/searxng.mdx
@@ -23,3 +23,8 @@ npm install @langchain/openai
```
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/stackexchange.mdx b/docs/core_docs/docs/integrations/tools/stackexchange.mdx
index b6f3f65b4e73..602003fc067c 100644
--- a/docs/core_docs/docs/integrations/tools/stackexchange.mdx
+++ b/docs/core_docs/docs/integrations/tools/stackexchange.mdx
@@ -13,3 +13,8 @@ The StackExchange tool connects your agents and chains to StackExchange's API.
import ToolExample from "@examples/tools/stackexchange.ts";
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/tavily_search.mdx b/docs/core_docs/docs/integrations/tools/tavily_search.mdx
index 1629f37c17d7..c61aaf953057 100644
--- a/docs/core_docs/docs/integrations/tools/tavily_search.mdx
+++ b/docs/core_docs/docs/integrations/tools/tavily_search.mdx
@@ -27,3 +27,8 @@ npm install @langchain/openai @langchain/community
import ToolExample from "@examples/tools/tavily_search.ts";
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/webbrowser.mdx b/docs/core_docs/docs/integrations/tools/webbrowser.mdx
index 1d625308827a..eccec0d797ee 100644
--- a/docs/core_docs/docs/integrations/tools/webbrowser.mdx
+++ b/docs/core_docs/docs/integrations/tools/webbrowser.mdx
@@ -44,3 +44,8 @@ npm install @langchain/openai
import AgentExample from "@examples/agents/mrkl_browser.ts";
{AgentExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/wikipedia.mdx b/docs/core_docs/docs/integrations/tools/wikipedia.mdx
index a58beeda24ad..67a221fd3341 100644
--- a/docs/core_docs/docs/integrations/tools/wikipedia.mdx
+++ b/docs/core_docs/docs/integrations/tools/wikipedia.mdx
@@ -13,3 +13,8 @@ The `WikipediaQueryRun` tool connects your agents and chains to Wikipedia.
import ToolExample from "@examples/tools/wikipedia.ts";
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/wolframalpha.mdx b/docs/core_docs/docs/integrations/tools/wolframalpha.mdx
index ee6631cbf9a6..bb379c47b609 100644
--- a/docs/core_docs/docs/integrations/tools/wolframalpha.mdx
+++ b/docs/core_docs/docs/integrations/tools/wolframalpha.mdx
@@ -17,3 +17,8 @@ You'll need to create an app from the [WolframAlpha portal](https://developer.wo
import ToolExample from "@examples/tools/wolframalpha.ts";
{ToolExample}
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/tools/zapier_agent.mdx b/docs/core_docs/docs/integrations/tools/zapier_agent.mdx
index 399af722a0b0..b7e91629c3df 100644
--- a/docs/core_docs/docs/integrations/tools/zapier_agent.mdx
+++ b/docs/core_docs/docs/integrations/tools/zapier_agent.mdx
@@ -66,3 +66,8 @@ const result = await executor.invoke({ input });
console.log(`Got output ${result.output}`);
```
+
+## Related
+
+- Tool [conceptual guide](/docs/concepts/#tools)
+- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/docs/core_docs/docs/integrations/vectorstores/analyticdb.mdx b/docs/core_docs/docs/integrations/vectorstores/analyticdb.mdx
index 747c9a1df524..7bc78bd12643 100644
--- a/docs/core_docs/docs/integrations/vectorstores/analyticdb.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/analyticdb.mdx
@@ -52,3 +52,8 @@ User-generated data such as usernames should not be used as input for the collec
import UsageExample from "@examples/indexes/vector_stores/analyticdb.ts";
{UsageExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/astradb.mdx b/docs/core_docs/docs/integrations/vectorstores/astradb.mdx
index 3c5a21a1e8eb..edf42acfe838 100644
--- a/docs/core_docs/docs/integrations/vectorstores/astradb.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/astradb.mdx
@@ -55,3 +55,8 @@ vector store is first created as part of the `CreateCollectionOptions`:
metric?: "cosine" | "euclidean" | "dot_product";
};
```
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/azure_aisearch.mdx b/docs/core_docs/docs/integrations/vectorstores/azure_aisearch.mdx
index 0f3f71a374fb..66fa9ae594f3 100644
--- a/docs/core_docs/docs/integrations/vectorstores/azure_aisearch.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/azure_aisearch.mdx
@@ -45,3 +45,8 @@ Below is an example that indexes documents from a file in Azure AI Search, runs
import Example from "@examples/indexes/vector_stores/azure_aisearch/azure_aisearch.ts";
{Example}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/azure_cosmosdb.mdx b/docs/core_docs/docs/integrations/vectorstores/azure_cosmosdb.mdx
index 47e9ce8e0612..db044c9d678f 100644
--- a/docs/core_docs/docs/integrations/vectorstores/azure_cosmosdb.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/azure_cosmosdb.mdx
@@ -35,3 +35,8 @@ based on the retrieved documents.
import Example from "@examples/indexes/vector_stores/azure_cosmosdb/azure_cosmosdb.ts";
{Example}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/cassandra.mdx b/docs/core_docs/docs/integrations/vectorstores/cassandra.mdx
index 7446a3ceeb19..e421704fcae8 100644
--- a/docs/core_docs/docs/integrations/vectorstores/cassandra.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/cassandra.mdx
@@ -220,3 +220,8 @@ In the configuration document, further optional parameters are provided; their d
| `maxConcurrency` | How many concurrent requests will be sent to Cassandra at a given time. |
| `batchSize` | How many documents will be sent on a single request to Cassandra. When using a value > 1, you should ensure your batch size will not exceed the Cassandra parameter `batch_size_fail_threshold_in_kb`. Batches are unlogged. |
| `withClause` | Cassandra tables may be created with an optional `WITH` clause; this is generally not needed but provided for completeness. |
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/chroma.mdx b/docs/core_docs/docs/integrations/vectorstores/chroma.mdx
index 3531ef11434d..a3785cd8d700 100644
--- a/docs/core_docs/docs/integrations/vectorstores/chroma.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/chroma.mdx
@@ -89,3 +89,8 @@ import Search from "@examples/indexes/vector_stores/chroma/search.ts";
import Delete from "@examples/indexes/vector_stores/chroma/delete.ts";
{Delete}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/clickhouse.mdx b/docs/core_docs/docs/integrations/vectorstores/clickhouse.mdx
index 549a2bf0aabe..ca63a4d9d068 100644
--- a/docs/core_docs/docs/integrations/vectorstores/clickhouse.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/clickhouse.mdx
@@ -43,3 +43,8 @@ import InsertExample from "@examples/indexes/vector_stores/clickhouse_fromTexts.
import SearchExample from "@examples/indexes/vector_stores/clickhouse_search.ts";
{SearchExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/closevector.mdx b/docs/core_docs/docs/integrations/vectorstores/closevector.mdx
index fcbc4346c951..030307f10d9b 100644
--- a/docs/core_docs/docs/integrations/vectorstores/closevector.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/closevector.mdx
@@ -57,3 +57,8 @@ import ExampleCloud from "@examples/indexes/vector_stores/closevector_saveload_f
import ExampleSave from "@examples/indexes/vector_stores/closevector_saveload.ts";
{ExampleSave}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/cloudflare_vectorize.mdx b/docs/core_docs/docs/integrations/vectorstores/cloudflare_vectorize.mdx
index e4886a9969ab..2b5c208ea124 100644
--- a/docs/core_docs/docs/integrations/vectorstores/cloudflare_vectorize.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/cloudflare_vectorize.mdx
@@ -69,3 +69,8 @@ import Example from "@examples/indexes/vector_stores/cloudflare_vectorize/exampl
You can also pass a `filter` parameter to filter by previously loaded metadata.
See [the official documentation](https://developers.cloudflare.com/vectorize/learning/metadata-filtering/)
for information on the required format.
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/convex.mdx b/docs/core_docs/docs/integrations/vectorstores/convex.mdx
index 9f71ef544754..97c7d85c9058 100644
--- a/docs/core_docs/docs/integrations/vectorstores/convex.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/convex.mdx
@@ -70,3 +70,8 @@ import Search from "@examples/indexes/vector_stores/convex/search.ts";
{Search}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/couchbase.mdx b/docs/core_docs/docs/integrations/vectorstores/couchbase.mdx
index f495b50959df..88878de81aad 100644
--- a/docs/core_docs/docs/integrations/vectorstores/couchbase.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/couchbase.mdx
@@ -367,3 +367,8 @@ To create Child Mappings, you can refer to the following docs -
- [Couchbase Capella](https://docs.couchbase.com/cloud/search/create-child-mapping.html)
- [Couchbase Server](https://docs.couchbase.com/server/current/fts/fts-creating-index-from-UI-classic-editor-dynamic.html)
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/elasticsearch.mdx b/docs/core_docs/docs/integrations/vectorstores/elasticsearch.mdx
index 9606b8737e3e..608635e3d7a8 100644
--- a/docs/core_docs/docs/integrations/vectorstores/elasticsearch.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/elasticsearch.mdx
@@ -42,3 +42,8 @@ npm install @langchain/openai
```
{FromDocs}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/faiss.mdx b/docs/core_docs/docs/integrations/vectorstores/faiss.mdx
index 24172dd082bc..eca8a4f6d69b 100644
--- a/docs/core_docs/docs/integrations/vectorstores/faiss.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/faiss.mdx
@@ -73,3 +73,8 @@ import ExampleSave from "@examples/indexes/vector_stores/faiss_saveload.ts";
import ExamplePython from "@examples/indexes/vector_stores/faiss_loadfrompython.ts";
{ExamplePython}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/googlevertexai.mdx b/docs/core_docs/docs/integrations/vectorstores/googlevertexai.mdx
index 7636ef2afeb3..afb9961bf64e 100644
--- a/docs/core_docs/docs/integrations/vectorstores/googlevertexai.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/googlevertexai.mdx
@@ -186,3 +186,8 @@ const oldResults: IdDocument[] = await engine.similaritySearch("this", 10);
const oldIds = oldResults.map( doc => doc.id! );
await engine.delete({ids: oldIds});
```
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/hanavector.mdx b/docs/core_docs/docs/integrations/vectorstores/hanavector.mdx
index 02eb917d20da..55861122fab2 100644
--- a/docs/core_docs/docs/integrations/vectorstores/hanavector.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/hanavector.mdx
@@ -46,3 +46,8 @@ import ExampleBasic from "@examples/indexes/vector_stores/hana_vector/basics.ts"
import ExampleChain from "@examples/indexes/vector_stores/hana_vector/chains.ts";
{ExampleChain}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/hnswlib.mdx b/docs/core_docs/docs/integrations/vectorstores/hnswlib.mdx
index 992e55b17ca0..e2b6d152ff16 100644
--- a/docs/core_docs/docs/integrations/vectorstores/hnswlib.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/hnswlib.mdx
@@ -65,3 +65,8 @@ import ExampleFilter from "@examples/indexes/vector_stores/hnswlib_filter.ts";
import ExampleDelete from "@examples/indexes/vector_stores/hnswlib_delete.ts";
{ExampleDelete}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/lancedb.mdx b/docs/core_docs/docs/integrations/vectorstores/lancedb.mdx
index 82d228f33808..8cc1b2cc401c 100644
--- a/docs/core_docs/docs/integrations/vectorstores/lancedb.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/lancedb.mdx
@@ -45,3 +45,8 @@ import ExampleLoader from "@examples/indexes/vector_stores/lancedb/fromDocs.ts";
import ExampleLoad from "@examples/indexes/vector_stores/lancedb/load.ts";
{ExampleLoad}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/memory.mdx b/docs/core_docs/docs/integrations/vectorstores/memory.mdx
index df34fb92a72c..76078538e203 100644
--- a/docs/core_docs/docs/integrations/vectorstores/memory.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/memory.mdx
@@ -49,3 +49,8 @@ const vectorStore = await MemoryVectorStore.fromTexts(
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
```
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/milvus.mdx b/docs/core_docs/docs/integrations/vectorstores/milvus.mdx
index 22f6d188319e..35e712de1f04 100644
--- a/docs/core_docs/docs/integrations/vectorstores/milvus.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/milvus.mdx
@@ -99,3 +99,8 @@ const vectorStore = await Milvus.fromExistingCollection(
const response = await vectorStore.similaritySearch("scared", 2);
```
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/momento_vector_index.mdx b/docs/core_docs/docs/integrations/vectorstores/momento_vector_index.mdx
index 7783f152b9c9..ac651390d2ec 100644
--- a/docs/core_docs/docs/integrations/vectorstores/momento_vector_index.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/momento_vector_index.mdx
@@ -76,3 +76,8 @@ import DocsExample from "@examples/indexes/vector_stores/momento_vector_index/fr
import ExistingExample from "@examples/indexes/vector_stores/momento_vector_index/fromExisting.ts";
{ExistingExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.mdx b/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.mdx
index 54d3d3b6ae48..dfe680b54353 100644
--- a/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.mdx
@@ -124,3 +124,8 @@ Then, in your code you can use [MQL Query Operators](https://www.mongodb.com/doc
import MetadataExample from "@examples/indexes/vector_stores/mongodb_metadata_filtering.ts";
{MetadataExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/myscale.mdx b/docs/core_docs/docs/integrations/vectorstores/myscale.mdx
index e9c3237135e6..c13435f153fc 100644
--- a/docs/core_docs/docs/integrations/vectorstores/myscale.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/myscale.mdx
@@ -37,3 +37,8 @@ import InsertExample from "@examples/indexes/vector_stores/myscale_fromTexts.ts"
import SearchExample from "@examples/indexes/vector_stores/myscale_search.ts";
{SearchExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/neo4jvector.mdx b/docs/core_docs/docs/integrations/vectorstores/neo4jvector.mdx
index b9a4ea52a538..e83ff6d74ce5 100644
--- a/docs/core_docs/docs/integrations/vectorstores/neo4jvector.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/neo4jvector.mdx
@@ -78,3 +78,8 @@ limit the permissions granted to the credentials used with this tool.
For example, creating read only users for the database is a good way to
ensure that the calling code cannot mutate or delete data.
See the [security page](/docs/security) for more information.
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/neon.mdx b/docs/core_docs/docs/integrations/vectorstores/neon.mdx
index 8e17cdfe7c18..048db6a27ed2 100644
--- a/docs/core_docs/docs/integrations/vectorstores/neon.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/neon.mdx
@@ -53,3 +53,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/indexes/vector_stores/neon/example.ts";
{Example}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/opensearch.mdx b/docs/core_docs/docs/integrations/vectorstores/opensearch.mdx
index 29881fb5bc70..1275e51daf18 100644
--- a/docs/core_docs/docs/integrations/vectorstores/opensearch.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/opensearch.mdx
@@ -115,3 +115,8 @@ console.log(JSON.stringify(response, null, 2));
}
*/
```
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/pgvector.mdx b/docs/core_docs/docs/integrations/vectorstores/pgvector.mdx
index 9921c67b3d05..049ac928db79 100644
--- a/docs/core_docs/docs/integrations/vectorstores/pgvector.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/pgvector.mdx
@@ -94,3 +94,8 @@ More info at the [`Pgvector GitHub project`](https://github.com/pgvector/pgvecto
import HnswExample from "@examples/indexes/vector_stores/pgvector_vectorstore/pgvector_hnsw.ts";
{HnswExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/pinecone.mdx b/docs/core_docs/docs/integrations/vectorstores/pinecone.mdx
index 2498b5928ea4..d7654489f435 100644
--- a/docs/core_docs/docs/integrations/vectorstores/pinecone.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/pinecone.mdx
@@ -45,3 +45,8 @@ that are most similar to the inputs, then reranks and optimizes for diversity.
import MMRExample from "@examples/indexes/vector_stores/pinecone/mmr.ts";
{MMRExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/prisma.mdx b/docs/core_docs/docs/integrations/vectorstores/prisma.mdx
index 142be61dd32b..b8f479629337 100644
--- a/docs/core_docs/docs/integrations/vectorstores/prisma.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/prisma.mdx
@@ -97,3 +97,8 @@ The samples above uses the following schema:
{Schema}
You can remove `namespace` if you don't need it.
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/qdrant.mdx b/docs/core_docs/docs/integrations/vectorstores/qdrant.mdx
index faa88ee3eafa..02cb4d7ac80b 100644
--- a/docs/core_docs/docs/integrations/vectorstores/qdrant.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/qdrant.mdx
@@ -51,3 +51,8 @@ import DocsExample from "@examples/indexes/vector_stores/qdrant/fromDocs.ts";
import ExistingExample from "@examples/indexes/vector_stores/qdrant/fromExisting.ts";
{ExistingExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/redis.mdx b/docs/core_docs/docs/integrations/vectorstores/redis.mdx
index fef8efb1c5a3..5a49a77169f2 100644
--- a/docs/core_docs/docs/integrations/vectorstores/redis.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/redis.mdx
@@ -57,3 +57,8 @@ import IndexOptions from "@examples/indexes/vector_stores/redis/redis_index_opti
import DeleteExample from "@examples/indexes/vector_stores/redis/redis_delete.ts";
{DeleteExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/rockset.mdx b/docs/core_docs/docs/integrations/vectorstores/rockset.mdx
index fc4169a11ef7..6be24a678295 100644
--- a/docs/core_docs/docs/integrations/vectorstores/rockset.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/rockset.mdx
@@ -32,3 +32,8 @@ import UsageExample from "@examples/indexes/vector_stores/rockset.ts";
Below is an example showcasing how to use OpenAI and Rockset to answer questions about a text file:
{UsageExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/singlestore.mdx b/docs/core_docs/docs/integrations/vectorstores/singlestore.mdx
index 451456f27623..f8ad18462e72 100644
--- a/docs/core_docs/docs/integrations/vectorstores/singlestore.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/singlestore.mdx
@@ -79,3 +79,8 @@ These versatile strategies empower users to fine-tune searches according to thei
Moreover, SingleStoreDB's hybrid approaches, exemplified by `FILTER_BY_TEXT`, `FILTER_BY_VECTOR`, and `WEIGHTED_SUM` strategies, seamlessly blend vector and text-based searches to maximize efficiency and accuracy, ensuring users can fully leverage the platform's capabilities for a wide range of applications.
{HybridSearchUsageExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/supabase.mdx b/docs/core_docs/docs/integrations/vectorstores/supabase.mdx
index 83d7bb006157..86583e975953 100644
--- a/docs/core_docs/docs/integrations/vectorstores/supabase.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/supabase.mdx
@@ -105,3 +105,8 @@ You can use maximal marginal relevance search, which optimizes for similarity to
### Document deletion
{DeletionExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/tigris.mdx b/docs/core_docs/docs/integrations/vectorstores/tigris.mdx
index dd3185a027c8..fea10b7c33af 100644
--- a/docs/core_docs/docs/integrations/vectorstores/tigris.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/tigris.mdx
@@ -120,3 +120,8 @@ console.log(JSON.stringify(results, null, 2));
]
*/
```
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/turbopuffer.mdx b/docs/core_docs/docs/integrations/vectorstores/turbopuffer.mdx
index 1461239a41c5..6c31c2bb94f9 100644
--- a/docs/core_docs/docs/integrations/vectorstores/turbopuffer.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/turbopuffer.mdx
@@ -22,3 +22,8 @@ keep in mind that currently only string values are supported.
See [here for more information](https://turbopuffer.com/docs/reference/query#filter-parameters) on acceptable filter formats.
{SimilaritySearchExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/typeorm.mdx b/docs/core_docs/docs/integrations/vectorstores/typeorm.mdx
index d140d06a9914..d6ed0677d4e8 100644
--- a/docs/core_docs/docs/integrations/vectorstores/typeorm.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/typeorm.mdx
@@ -45,3 +45,8 @@ import Example from "@examples/indexes/vector_stores/typeorm_vectorstore/typeorm
One complete example of using `TypeORMVectorStore` is the following:
{Example}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/typesense.mdx b/docs/core_docs/docs/integrations/vectorstores/typesense.mdx
index e0a0b7a45395..5b28c202855d 100644
--- a/docs/core_docs/docs/integrations/vectorstores/typesense.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/typesense.mdx
@@ -138,3 +138,8 @@ Before starting, create a schema in Typesense with an id, a field for the vector
- `static async fromTexts(texts: string[], metadatas: object[], embeddings: Embeddings, config: TypesenseConfig): Promise`: Creates a Typesense vector store from a list of texts and associated metadata. Texts are converted to documents and added to the vector store during construction.
- `async similaritySearch(query: string, k?: number, filter?: Record): Promise`: Searches for similar documents based on a query. Returns an array of similar documents.
- `async deleteDocuments(documentIds: string[]): Promise`: Deletes documents from the vector store based on their IDs.
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/upstash.mdx b/docs/core_docs/docs/integrations/vectorstores/upstash.mdx
index 878209f30e54..8327da74daaf 100644
--- a/docs/core_docs/docs/integrations/vectorstores/upstash.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/upstash.mdx
@@ -60,3 +60,8 @@ It's possible to use the embeddings service of Upstash, which is based on the em
You can also delete the documents you've indexed previously.
{DeleteExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/usearch.mdx b/docs/core_docs/docs/integrations/vectorstores/usearch.mdx
index 27bc930b8a66..daf94caffc8c 100644
--- a/docs/core_docs/docs/integrations/vectorstores/usearch.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/usearch.mdx
@@ -41,3 +41,8 @@ import ExampleTexts from "@examples/indexes/vector_stores/usearch.ts";
import ExampleLoader from "@examples/indexes/vector_stores/usearch_fromdocs.ts";
{ExampleLoader}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/vectara.mdx b/docs/core_docs/docs/integrations/vectorstores/vectara.mdx
index 0b1f0372d233..e77e9b18198e 100644
--- a/docs/core_docs/docs/integrations/vectorstores/vectara.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/vectara.mdx
@@ -59,3 +59,8 @@ Vectara's LangChain vector store consumes Vectara's core APIs:
- [Indexing API](https://docs.vectara.com/docs/indexing-apis/indexing) for storing documents in a Vectara corpus.
- [Search API](https://docs.vectara.com/docs/search-apis/search) for querying this data. This API supports hybrid search.
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/vercel_postgres.mdx b/docs/core_docs/docs/integrations/vectorstores/vercel_postgres.mdx
index d79030ebdeba..784249802249 100644
--- a/docs/core_docs/docs/integrations/vectorstores/vercel_postgres.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/vercel_postgres.mdx
@@ -50,3 +50,8 @@ import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/indexes/vector_stores/vercel_postgres/example.ts";
{Example}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/voy.mdx b/docs/core_docs/docs/integrations/vectorstores/voy.mdx
index 8927bcd0b830..1058a348c5fe 100644
--- a/docs/core_docs/docs/integrations/vectorstores/voy.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/voy.mdx
@@ -20,3 +20,8 @@ npm install @langchain/openai voy-search @langchain/community
import Example from "@examples/indexes/vector_stores/voy.ts";
{Example}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/weaviate.mdx b/docs/core_docs/docs/integrations/vectorstores/weaviate.mdx
index 7271ff7d03fb..0363e3fdd63e 100644
--- a/docs/core_docs/docs/integrations/vectorstores/weaviate.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/weaviate.mdx
@@ -50,3 +50,8 @@ You can use maximal marginal relevance search, which optimizes for similarity to
import DeleteExample from "@examples/indexes/vector_stores/weaviate_delete.ts";
{DeleteExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/xata.mdx b/docs/core_docs/docs/integrations/vectorstores/xata.mdx
index dacc2be04712..a84e14e71323 100644
--- a/docs/core_docs/docs/integrations/vectorstores/xata.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/xata.mdx
@@ -58,3 +58,8 @@ This example shows how to implement semantic search using LangChain.js and Xata.
import SimSearch from "@examples/indexes/vector_stores/xata_metadata.ts";
{SimSearch}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/zep.mdx b/docs/core_docs/docs/integrations/vectorstores/zep.mdx
index b384eb8ffad2..f7aeacf4ae9f 100644
--- a/docs/core_docs/docs/integrations/vectorstores/zep.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/zep.mdx
@@ -62,3 +62,8 @@ import ExampleMetadata from "@examples/indexes/vector_stores/zep/zep_with_metada
import ExampleOpenAI from "@examples/indexes/vector_stores/zep/zep_with_openai_embeddings.ts";
{ExampleOpenAI}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/docs/integrations/vectorstores/zep_cloud.mdx b/docs/core_docs/docs/integrations/vectorstores/zep_cloud.mdx
index 23861ac1f1d2..18bd4beb19b0 100644
--- a/docs/core_docs/docs/integrations/vectorstores/zep_cloud.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/zep_cloud.mdx
@@ -55,3 +55,8 @@ import ZepCloudVectorStoreExpressionLanguageExample from "@examples/guides/expre
{ZepCloudVectorStoreExpressionLanguageExample}
+
+## Related
+
+- Vector store [conceptual guide](/docs/concepts/#vectorstores)
+- Vector store [how-to guides](/docs/how_to/#vectorstores)
diff --git a/docs/core_docs/package.json b/docs/core_docs/package.json
index 86526b9483af..7a1ed4c37de7 100644
--- a/docs/core_docs/package.json
+++ b/docs/core_docs/package.json
@@ -22,8 +22,8 @@
"format": "prettier --write \"**/*.{js,jsx,ts,tsx,md,mdx}\"",
"format:check": "prettier --check \"**/*.{js,jsx,ts,tsx,md,mdx}\"",
"clean": "rm -rf .docusaurus/ .turbo/ .build/",
- "quarto": "quarto render docs/ && node ./scripts/quarto-build.js",
- "quarto:vercel": "node ./scripts/quarto-build.js",
+ "quarto": "quarto render docs/ && node ./scripts/quarto-build.js && python3 ./scripts/append_related_links.py ./docs",
+ "quarto:vercel": "node ./scripts/quarto-build.js && python3 ./scripts/append_related_links.py ./docs",
"gen": "yarn gen:supabase",
"gen:supabase": "npx supabase gen types typescript --project-id 'xsqpnijvmbodcxyapnyq' --schema public > ./src/supabase.d.ts",
"broken-links": "node ./scripts/check-broken-links.js",
diff --git a/docs/core_docs/scripts/append_related_links.py b/docs/core_docs/scripts/append_related_links.py
new file mode 100644
index 000000000000..ec2b31e96cc8
--- /dev/null
+++ b/docs/core_docs/scripts/append_related_links.py
@@ -0,0 +1,73 @@
+import itertools
+import multiprocessing
+import re
+import sys
+from pathlib import Path
+
+
+def _generate_related_links_section(integration_type: str, notebook_name: str):
+ concept_display_name = None
+ concept_heading = None
+ if integration_type == "chat":
+ concept_display_name = "Chat model"
+ concept_heading = "chat-models"
+ elif integration_type == "llms":
+ concept_display_name = "LLM"
+ concept_heading = "llms"
+ elif integration_type == "text_embedding":
+ concept_display_name = "Embedding model"
+ concept_heading = "embedding-models"
+ elif integration_type == "document_loaders":
+ concept_display_name = "Document loader"
+ concept_heading = "document-loaders"
+ elif integration_type == "vectorstores":
+ concept_display_name = "Vector store"
+ concept_heading = "vectorstores"
+ elif integration_type == "retrievers":
+ concept_display_name = "Retriever"
+ concept_heading = "retrievers"
+ elif integration_type == "tools":
+ concept_display_name = "Tool"
+ concept_heading = "tools"
+ elif integration_type == "stores":
+ concept_display_name = "Key-value store"
+ concept_heading = "key-value-stores"
+ # Special case because there are no key-value store how-tos yet
+ return f"""## Related
+
+- [{concept_display_name} conceptual guide](/docs/concepts/#{concept_heading})
+"""
+ else:
+ return None
+ return f"""## Related
+
+- {concept_display_name} [conceptual guide](/docs/concepts/#{concept_heading})
+- {concept_display_name} [how-to guides](/docs/how_to/#{concept_heading})
+"""
+
+
+def _process_path(doc_path: Path):
+ content = doc_path.read_text()
+ has_related = "## Related" in content
+ pattern = r"docs/integrations/([^/]+)/([^/]+).mdx?"
+ match = re.search(pattern, str(doc_path))
+ if match and match.group(2) != "index" and not has_related:
+ integration_type = match.group(1)
+ notebook_name = match.group(2)
+ related_links_section = _generate_related_links_section(
+ integration_type, notebook_name
+ )
+ if related_links_section:
+ content = content + "\n\n" + related_links_section
+ doc_path.write_text(content)
+
+
+if __name__ == "__main__":
+ output_docs_dir = Path(sys.argv[1])
+
+ mds = output_docs_dir.rglob("integrations/**/*.md")
+ mdxs = output_docs_dir.rglob("integrations/**/*.mdx")
+ paths = itertools.chain(mds, mdxs)
+ # modify all md files in place
+ with multiprocessing.Pool() as pool:
+ pool.map(_process_path, paths)
diff --git a/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb b/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb
index 5a296128d703..f0630d05424f 100644
--- a/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb
+++ b/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb
@@ -33,6 +33,9 @@
"| [__module_name__](__api_ref_module__) | [__package_name__](__api_ref_package__) | __local__ | __serializable__ | __py_support__ | ![NPM - Downloads](https://img.shields.io/npm/dm/__package_name__?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/__package_name__?style=flat-square&label=%20&) |\n",
"\n",
"### Model features\n",
+ "\n",
+ "See the links in the table headers below for guides on how to use specific features.\n",
+ "\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| __tool_calling__ | __tool_calling__ | __json_mode__ | __image_input__ | __audio_input__ | __video_input__ | __token_level_streaming__ | __token_usage__ | __logprobs__ | \n",