diff --git a/docs/core_docs/docs/integrations/chat/mistral.ipynb b/docs/core_docs/docs/integrations/chat/mistral.ipynb
index 8f2a6fa16f6f..9822a2aa8b4d 100644
--- a/docs/core_docs/docs/integrations/chat/mistral.ipynb
+++ b/docs/core_docs/docs/integrations/chat/mistral.ipynb
@@ -42,7 +42,7 @@
"\n",
"## Setup\n",
"\n",
- "To access `ChatMistralAI` models you'll need to create a `ChatMistralAI` account, get an API key, and install the `@langchain/mistralai` integration package.\n",
+ "To access Mistral AI models you'll need to create a Mistral AI account, get an API key, and install the `@langchain/mistralai` integration package.\n",
"\n",
"### Credentials\n",
"\n",
diff --git a/libs/langchain-community/src/chat_models/fireworks.ts b/libs/langchain-community/src/chat_models/fireworks.ts
index f6bf28824258..36878eeced80 100644
--- a/libs/langchain-community/src/chat_models/fireworks.ts
+++ b/libs/langchain-community/src/chat_models/fireworks.ts
@@ -413,6 +413,8 @@ export type ChatFireworksCallOptions = Partial<
*
*
*
+ *
+ *
* Usage Metadata
*
* ```typescript
diff --git a/libs/langchain-community/src/chat_models/togetherai.ts b/libs/langchain-community/src/chat_models/togetherai.ts
index b377024406a1..9d512e9a27a9 100644
--- a/libs/langchain-community/src/chat_models/togetherai.ts
+++ b/libs/langchain-community/src/chat_models/togetherai.ts
@@ -44,23 +44,371 @@ export interface ChatTogetherAIInput
}
/**
- * Wrapper around TogetherAI API for large language models fine-tuned for chat
+ * TogetherAI chat model integration.
*
- * TogetherAI API is compatible to the OpenAI API with some limitations. View the
+ * The TogetherAI API is compatible to the OpenAI API with some limitations. View the
* full API ref at:
* @link {https://docs.together.ai/reference/chat-completions}
*
- * To use, you should have the `TOGETHER_AI_API_KEY` environment variable set.
- * @example
+ * Setup:
+ * Install `@langchain/community` and set an environment variable named `TOGETHER_AI_API_KEY`.
+ *
+ * ```bash
+ * npm install @langchain/community
+ * export TOGETHER_AI_API_KEY="your-api-key"
+ * ```
+ *
+ * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_community.chat_models_togetherai.ChatTogetherAI.html#constructor)
+ *
+ * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_community.chat_models_togetherai.ChatTogetherAICallOptions.html)
+ *
+ * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
+ * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
+ *
+ * ```typescript
+ * // When calling `.bind`, call options should be passed via the first argument
+ * const llmWithArgsBound = llm.bind({
+ * stop: ["\n"],
+ * tools: [...],
+ * });
+ *
+ * // When calling `.bindTools`, call options should be passed via the second argument
+ * const llmWithTools = llm.bindTools(
+ * [...],
+ * {
+ * tool_choice: "auto",
+ * }
+ * );
+ * ```
+ *
+ * ## Examples
+ *
+ *
+ * Instantiate
+ *
* ```typescript
- * const model = new ChatTogetherAI({
- * temperature: 0.9,
- * apiKey: process.env.TOGETHER_AI_API_KEY,
+ * import { ChatTogetherAI } from '@langchain/community/chat_models/togetherai';
+ *
+ * const llm = new ChatTogetherAI({
+ * model: "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
+ * temperature: 0,
+ * // other params...
* });
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Invoking
+ *
+ * ```typescript
+ * const input = `Translate "I love programming" into French.`;
+ *
+ * // Models also accept a list of chat messages or a formatted prompt
+ * const result = await llm.invoke(input);
+ * console.log(result);
+ * ```
+ *
+ * ```txt
+ * AIMessage {
+ * "id": "8b23ea7bcc4c924b-MUC",
+ * "content": "\"J'adore programmer\"",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "tokenUsage": {
+ * "completionTokens": 8,
+ * "promptTokens": 19,
+ * "totalTokens": 27
+ * },
+ * "finish_reason": "eos"
+ * },
+ * "tool_calls": [],
+ * "invalid_tool_calls": [],
+ * "usage_metadata": {
+ * "input_tokens": 19,
+ * "output_tokens": 8,
+ * "total_tokens": 27
+ * }
+ * }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Streaming Chunks
+ *
+ * ```typescript
+ * for await (const chunk of await llm.stream(input)) {
+ * console.log(chunk);
+ * }
+ * ```
+ *
+ * ```txt
+ * AIMessageChunk {
+ * "id": "8b23eb602fb19263-MUC",
+ * "content": "\"",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0,
+ * "finish_reason": null
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "id": "8b23eb602fb19263-MUC",
+ * "content": "J",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0,
+ * "finish_reason": null
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "id": "8b23eb602fb19263-MUC",
+ * "content": "'",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0,
+ * "finish_reason": null
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "id": "8b23eb602fb19263-MUC",
+ * "content": "ad",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0,
+ * "finish_reason": null
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "id": "8b23eb602fb19263-MUC",
+ * "content": "ore",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0,
+ * "finish_reason": null
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "id": "8b23eb602fb19263-MUC",
+ * "content": " programmer",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0,
+ * "finish_reason": null
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "id": "8b23eb602fb19263-MUC",
+ * "content": "\"",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0,
+ * "finish_reason": null
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "id": "8b23eb602fb19263-MUC",
+ * "content": "",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0,
+ * "finish_reason": "eos"
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "content": "",
+ * "additional_kwargs": {},
+ * "response_metadata": {},
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": [],
+ * "usage_metadata": {
+ * "input_tokens": 19,
+ * "output_tokens": 8,
+ * "total_tokens": 27
+ * }
+ * }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Aggregate Streamed Chunks
+ *
+ * ```typescript
+ * import { AIMessageChunk } from '@langchain/core/messages';
+ * import { concat } from '@langchain/core/utils/stream';
+ *
+ * const stream = await llm.stream(input);
+ * let full: AIMessageChunk | undefined;
+ * for await (const chunk of stream) {
+ * full = !full ? chunk : concat(full, chunk);
+ * }
+ * console.log(full);
+ * ```
+ *
+ * ```txt
+ * AIMessageChunk {
+ * "id": "8b23ecd42e469236-MUC",
+ * "content": "\"J'adore programmer\"",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0,
+ * "finish_reason": "eos"
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": [],
+ * "usage_metadata": {
+ * "input_tokens": 19,
+ * "output_tokens": 8,
+ * "total_tokens": 27
+ * }
+ * }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Bind tools
+ *
+ * ```typescript
+ * import { z } from 'zod';
+ *
+ * const GetWeather = {
+ * name: "GetWeather",
+ * description: "Get the current weather in a given location",
+ * schema: z.object({
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
+ * }),
+ * }
+ *
+ * const GetPopulation = {
+ * name: "GetPopulation",
+ * description: "Get the current population in a given location",
+ * schema: z.object({
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
+ * }),
+ * }
+ *
+ * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
+ * const aiMsg = await llmWithTools.invoke(
+ * "Which city is hotter today and which is bigger: LA or NY? Respond with JSON and use tools."
+ * );
+ * console.log(aiMsg.tool_calls);
+ * ```
+ *
+ * ```txt
+ * [
+ * {
+ * name: 'GetWeather',
+ * args: { location: 'Los Angeles' },
+ * type: 'tool_call',
+ * id: 'call_q8i4zx1udqjjnou2bzbrg8ms'
+ * }
+ * ]
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Structured Output
+ *
+ * ```typescript
+ * import { z } from 'zod';
+ *
+ * const Joke = z.object({
+ * setup: z.string().describe("The setup of the joke"),
+ * punchline: z.string().describe("The punchline to the joke"),
+ * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
+ * }).describe('Joke to tell user.');
*
- * const response = await model.invoke([new HumanMessage("Hello there!")]);
- * console.log(response);
+ * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
+ * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
+ * console.log(jokeResult);
* ```
+ *
+ * ```txt
+ * {
+ * setup: 'Why did the cat join a band',
+ * punchline: 'Because it wanted to be the purr-cussionist'
+ * }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Usage Metadata
+ *
+ * ```typescript
+ * const aiMsgForMetadata = await llm.invoke(input);
+ * console.log(aiMsgForMetadata.usage_metadata);
+ * ```
+ *
+ * ```txt
+ * { input_tokens: 19, output_tokens: 65, total_tokens: 84 }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Response Metadata
+ *
+ * ```typescript
+ * const aiMsgForResponseMetadata = await llm.invoke(input);
+ * console.log(aiMsgForResponseMetadata.response_metadata);
+ * ```
+ *
+ * ```txt
+ * {
+ * tokenUsage: { completionTokens: 91, promptTokens: 19, totalTokens: 110 },
+ * finish_reason: 'eos'
+ * }
+ * ```
+ *
+ *
+ *
*/
export class ChatTogetherAI extends ChatOpenAI {
static lc_name() {
diff --git a/libs/langchain-mistralai/src/chat_models.ts b/libs/langchain-mistralai/src/chat_models.ts
index c78776d4701f..b8d1a6348f70 100644
--- a/libs/langchain-mistralai/src/chat_models.ts
+++ b/libs/langchain-mistralai/src/chat_models.ts
@@ -410,7 +410,330 @@ function _convertToolToMistralTool(
}
/**
- * Integration with a chat model.
+ * Mistral AI chat model integration.
+ *
+ * Setup:
+ * Install `@langchain/mistralai` and set an environment variable named `MISTRAL_API_KEY`.
+ *
+ * ```bash
+ * npm install @langchain/mistralai
+ * export MISTRAL_API_KEY="your-api-key"
+ * ```
+ *
+ * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_mistralai.ChatMistralAI.html#constructor)
+ *
+ * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_mistralai.ChatMistralAICallOptions.html)
+ *
+ * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
+ * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
+ *
+ * ```typescript
+ * // When calling `.bind`, call options should be passed via the first argument
+ * const llmWithArgsBound = llm.bind({
+ * stop: ["\n"],
+ * tools: [...],
+ * });
+ *
+ * // When calling `.bindTools`, call options should be passed via the second argument
+ * const llmWithTools = llm.bindTools(
+ * [...],
+ * {
+ * tool_choice: "auto",
+ * }
+ * );
+ * ```
+ *
+ * ## Examples
+ *
+ *
+ * Instantiate
+ *
+ * ```typescript
+ * import { ChatMistralAI } from '@langchain/mistralai';
+ *
+ * const llm = new ChatMistralAI({
+ * model: "mistral-large-2402",
+ * temperature: 0,
+ * // other params...
+ * });
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Invoking
+ *
+ * ```typescript
+ * const input = `Translate "I love programming" into French.`;
+ *
+ * // Models also accept a list of chat messages or a formatted prompt
+ * const result = await llm.invoke(input);
+ * console.log(result);
+ * ```
+ *
+ * ```txt
+ * AIMessage {
+ * "content": "The translation of \"I love programming\" into French is \"J'aime la programmation\". Here's the breakdown:\n\n- \"I\" translates to \"Je\"\n- \"love\" translates to \"aime\"\n- \"programming\" translates to \"la programmation\"\n\nSo, \"J'aime la programmation\" means \"I love programming\" in French.",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "tokenUsage": {
+ * "completionTokens": 89,
+ * "promptTokens": 13,
+ * "totalTokens": 102
+ * },
+ * "finish_reason": "stop"
+ * },
+ * "tool_calls": [],
+ * "invalid_tool_calls": [],
+ * "usage_metadata": {
+ * "input_tokens": 13,
+ * "output_tokens": 89,
+ * "total_tokens": 102
+ * }
+ * }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Streaming Chunks
+ *
+ * ```typescript
+ * for await (const chunk of await llm.stream(input)) {
+ * console.log(chunk);
+ * }
+ * ```
+ *
+ * ```txt
+ * AIMessageChunk {
+ * "content": "The",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "content": " translation",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "content": " of",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "content": " \"",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "content": "I",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "content": ".",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ *}
+ *AIMessageChunk {
+ * "content": "",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": [],
+ * "usage_metadata": {
+ * "input_tokens": 13,
+ * "output_tokens": 89,
+ * "total_tokens": 102
+ * }
+ *}
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Aggregate Streamed Chunks
+ *
+ * ```typescript
+ * import { AIMessageChunk } from '@langchain/core/messages';
+ * import { concat } from '@langchain/core/utils/stream';
+ *
+ * const stream = await llm.stream(input);
+ * let full: AIMessageChunk | undefined;
+ * for await (const chunk of stream) {
+ * full = !full ? chunk : concat(full, chunk);
+ * }
+ * console.log(full);
+ * ```
+ *
+ * ```txt
+ * AIMessageChunk {
+ * "content": "The translation of \"I love programming\" into French is \"J'aime la programmation\". Here's the breakdown:\n\n- \"I\" translates to \"Je\"\n- \"love\" translates to \"aime\"\n- \"programming\" translates to \"la programmation\"\n\nSo, \"J'aime la programmation\" means \"I love programming\" in French.",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "prompt": 0,
+ * "completion": 0
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": [],
+ * "usage_metadata": {
+ * "input_tokens": 13,
+ * "output_tokens": 89,
+ * "total_tokens": 102
+ * }
+ * }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Bind tools
+ *
+ * ```typescript
+ * import { z } from 'zod';
+ *
+ * const GetWeather = {
+ * name: "GetWeather",
+ * description: "Get the current weather in a given location",
+ * schema: z.object({
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
+ * }),
+ * }
+ *
+ * const GetPopulation = {
+ * name: "GetPopulation",
+ * description: "Get the current population in a given location",
+ * schema: z.object({
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
+ * }),
+ * }
+ *
+ * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
+ * const aiMsg = await llmWithTools.invoke(
+ * "Which city is hotter today and which is bigger: LA or NY?"
+ * );
+ * console.log(aiMsg.tool_calls);
+ * ```
+ *
+ * ```txt
+ * [
+ * {
+ * name: 'GetWeather',
+ * args: { location: 'Los Angeles, CA' },
+ * type: 'tool_call',
+ * id: '47i216yko'
+ * },
+ * {
+ * name: 'GetWeather',
+ * args: { location: 'New York, NY' },
+ * type: 'tool_call',
+ * id: 'nb3v8Fpcn'
+ * },
+ * {
+ * name: 'GetPopulation',
+ * args: { location: 'Los Angeles, CA' },
+ * type: 'tool_call',
+ * id: 'EedWzByIB'
+ * },
+ * {
+ * name: 'GetPopulation',
+ * args: { location: 'New York, NY' },
+ * type: 'tool_call',
+ * id: 'jLdLia7zC'
+ * }
+ * ]
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Structured Output
+ *
+ * ```typescript
+ * import { z } from 'zod';
+ *
+ * const Joke = z.object({
+ * setup: z.string().describe("The setup of the joke"),
+ * punchline: z.string().describe("The punchline to the joke"),
+ * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
+ * }).describe('Joke to tell user.');
+ *
+ * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
+ * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
+ * console.log(jokeResult);
+ * ```
+ *
+ * ```txt
+ * {
+ * setup: "Why don't cats play poker in the jungle?",
+ * punchline: 'Too many cheetahs!',
+ * rating: 7
+ * }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Usage Metadata
+ *
+ * ```typescript
+ * const aiMsgForMetadata = await llm.invoke(input);
+ * console.log(aiMsgForMetadata.usage_metadata);
+ * ```
+ *
+ * ```txt
+ * { input_tokens: 13, output_tokens: 89, total_tokens: 102 }
+ * ```
+ *
+ *
+ *
*/
export class ChatMistralAI<
CallOptions extends ChatMistralAICallOptions = ChatMistralAICallOptions
diff --git a/libs/langchain-ollama/src/chat_models.ts b/libs/langchain-ollama/src/chat_models.ts
index 7c42b37ea1f3..8c8ea9fc66d1 100644
--- a/libs/langchain-ollama/src/chat_models.ts
+++ b/libs/langchain-ollama/src/chat_models.ts
@@ -82,22 +82,301 @@ export interface ChatOllamaInput
}
/**
- * Integration with the Ollama SDK.
+ * Ollama chat model integration.
+ *
+ * Setup:
+ * Install `@langchain/ollama` and the Ollama app.
+ *
+ * ```bash
+ * npm install @langchain/ollama
+ * ```
+ *
+ * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_ollama.ChatOllama.html#constructor)
+ *
+ * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_ollama.ChatOllamaCallOptions.html)
+ *
+ * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
+ * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
+ *
+ * ```typescript
+ * // When calling `.bind`, call options should be passed via the first argument
+ * const llmWithArgsBound = llm.bind({
+ * stop: ["\n"],
+ * tools: [...],
+ * });
+ *
+ * // When calling `.bindTools`, call options should be passed via the second argument
+ * const llmWithTools = llm.bindTools(
+ * [...],
+ * {
+ * tool_choice: "auto",
+ * }
+ * );
+ * ```
+ *
+ * ## Examples
+ *
+ *
+ * Instantiate
*
- * @example
* ```typescript
- * import { ChatOllama } from "@langchain/ollama";
+ * import { ChatOllama } from '@langchain/ollama';
*
- * const model = new ChatOllama({
- * model: "llama3", // Default model.
+ * const llm = new ChatOllama({
+ * model: "llama-3.1:8b",
+ * temperature: 0,
+ * // other params...
* });
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Invoking
*
- * const result = await model.invoke([
- * "human",
- * "What is a good name for a company that makes colorful socks?",
- * ]);
+ * ```typescript
+ * const input = `Translate "I love programming" into French.`;
+ *
+ * // Models also accept a list of chat messages or a formatted prompt
+ * const result = await llm.invoke(input);
* console.log(result);
* ```
+ *
+ * ```txt
+ * AIMessage {
+ * "content": "The translation of \"I love programming\" into French is:\n\n\"J'adore programmer.\"",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "model": "llama3.1:8b",
+ * "created_at": "2024-08-12T22:12:23.09468Z",
+ * "done_reason": "stop",
+ * "done": true,
+ * "total_duration": 3715571291,
+ * "load_duration": 35244375,
+ * "prompt_eval_count": 19,
+ * "prompt_eval_duration": 3092116000,
+ * "eval_count": 20,
+ * "eval_duration": 585789000
+ * },
+ * "tool_calls": [],
+ * "invalid_tool_calls": [],
+ * "usage_metadata": {
+ * "input_tokens": 19,
+ * "output_tokens": 20,
+ * "total_tokens": 39
+ * }
+ * }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Streaming Chunks
+ *
+ * ```typescript
+ * for await (const chunk of await llm.stream(input)) {
+ * console.log(chunk);
+ * }
+ * ```
+ *
+ * ```txt
+ * AIMessageChunk {
+ * "content": "The",
+ * "additional_kwargs": {},
+ * "response_metadata": {},
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "content": " translation",
+ * "additional_kwargs": {},
+ * "response_metadata": {},
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "content": " of",
+ * "additional_kwargs": {},
+ * "response_metadata": {},
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "content": " \"",
+ * "additional_kwargs": {},
+ * "response_metadata": {},
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "content": "I",
+ * "additional_kwargs": {},
+ * "response_metadata": {},
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * ...
+ * AIMessageChunk {
+ * "content": "",
+ * "additional_kwargs": {},
+ * "response_metadata": {},
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": []
+ * }
+ * AIMessageChunk {
+ * "content": "",
+ * "additional_kwargs": {},
+ * "response_metadata": {
+ * "model": "llama3.1:8b",
+ * "created_at": "2024-08-12T22:13:22.22423Z",
+ * "done_reason": "stop",
+ * "done": true,
+ * "total_duration": 8599883208,
+ * "load_duration": 35975875,
+ * "prompt_eval_count": 19,
+ * "prompt_eval_duration": 7918195000,
+ * "eval_count": 20,
+ * "eval_duration": 643569000
+ * },
+ * "tool_calls": [],
+ * "tool_call_chunks": [],
+ * "invalid_tool_calls": [],
+ * "usage_metadata": {
+ * "input_tokens": 19,
+ * "output_tokens": 20,
+ * "total_tokens": 39
+ * }
+ * }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Bind tools
+ *
+ * ```typescript
+ * import { z } from 'zod';
+ *
+ * const GetWeather = {
+ * name: "GetWeather",
+ * description: "Get the current weather in a given location",
+ * schema: z.object({
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
+ * }),
+ * }
+ *
+ * const GetPopulation = {
+ * name: "GetPopulation",
+ * description: "Get the current population in a given location",
+ * schema: z.object({
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
+ * }),
+ * }
+ *
+ * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
+ * const aiMsg = await llmWithTools.invoke(
+ * "Which city is hotter today and which is bigger: LA or NY?"
+ * );
+ * console.log(aiMsg.tool_calls);
+ * ```
+ *
+ * ```txt
+ * [
+ * {
+ * name: 'GetWeather',
+ * args: { location: 'Los Angeles, CA' },
+ * id: '49410cad-2163-415e-bdcd-d26938a9c8c5',
+ * type: 'tool_call'
+ * },
+ * {
+ * name: 'GetPopulation',
+ * args: { location: 'New York, NY' },
+ * id: '39e230e4-63ec-4fae-9df0-21c3abe735ad',
+ * type: 'tool_call'
+ * }
+ * ]
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Structured Output
+ *
+ * ```typescript
+ * import { z } from 'zod';
+ *
+ * const Joke = z.object({
+ * setup: z.string().describe("The setup of the joke"),
+ * punchline: z.string().describe("The punchline to the joke"),
+ * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
+ * }).describe('Joke to tell user.');
+ *
+ * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
+ * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
+ * console.log(jokeResult);
+ * ```
+ *
+ * ```txt
+ * {
+ * punchline: 'Why did the cat join a band? Because it wanted to be the purr-cussionist!',
+ * rating: 8,
+ * setup: 'A cat walks into a music store and asks the owner...'
+ * }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Usage Metadata
+ *
+ * ```typescript
+ * const aiMsgForMetadata = await llm.invoke(input);
+ * console.log(aiMsgForMetadata.usage_metadata);
+ * ```
+ *
+ * ```txt
+ * { input_tokens: 19, output_tokens: 20, total_tokens: 39 }
+ * ```
+ *
+ *
+ *
+ *
+ *
+ * Response Metadata
+ *
+ * ```typescript
+ * const aiMsgForResponseMetadata = await llm.invoke(input);
+ * console.log(aiMsgForResponseMetadata.response_metadata);
+ * ```
+ *
+ * ```txt
+ * {
+ * model: 'llama3.1:8b',
+ * created_at: '2024-08-12T22:17:42.274795Z',
+ * done_reason: 'stop',
+ * done: true,
+ * total_duration: 6767071209,
+ * load_duration: 31628209,
+ * prompt_eval_count: 19,
+ * prompt_eval_duration: 6124504000,
+ * eval_count: 20,
+ * eval_duration: 608785000
+ * }
+ * ```
+ *
+ *
+ *
*/
export class ChatOllama
extends BaseChatModel